aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/drm_crtc.c180
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_i2c_helper.c (renamed from drivers/gpu/drm/i915/intel_dp_i2c.c)76
-rw-r--r--drivers/gpu/drm/drm_drv.c42
-rw-r--r--drivers/gpu/drm/drm_edid.c328
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_fops.c112
-rw-r--r--drivers/gpu/drm/drm_irq.c130
-rw-r--r--drivers/gpu/drm/drm_mm.c110
-rw-r--r--drivers/gpu/drm/drm_modes.c28
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i2c/Makefile4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c531
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c473
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h344
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c9
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c16
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c37
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c120
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h80
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c116
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c163
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c92
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h71
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c86
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c137
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c50
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1036
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c162
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h44
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c142
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1416
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig44
-rw-r--r--drivers/gpu/drm/nouveau/Makefile31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c125
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c6095
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h289
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c671
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c478
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c468
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c824
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h157
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c569
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c405
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1286
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h91
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c380
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c262
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c992
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c1080
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h455
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c269
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c702
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c568
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c1294
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h836
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c321
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c811
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c131
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c1002
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c528
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c621
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c288
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c316
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c271
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c579
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c208
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c305
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c24
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c892
-rw-r--r--drivers/gpu/drm/nouveau/nv17_gpio.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c681
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h156
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c583
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c780
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c62
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c314
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c560
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c769
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c156
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c304
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1015
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c273
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c494
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c385
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c509
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c309
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h535
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c33
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h18
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c59
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c790
-rw-r--r--drivers/gpu/drm/radeon/r100.c245
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h10
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
-rw-r--r--drivers/gpu/drm/radeon/r420.c25
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c1151
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c34
-rw-r--r--drivers/gpu/drm/radeon/r600d.h212
-rw-r--r--drivers/gpu/drm/radeon/radeon.h165
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h70
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c688
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c194
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c145
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c276
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c182
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c125
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h149
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c565
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h157
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c96
-rw-r--r--drivers/gpu/drm/radeon/rs400.c17
-rw-r--r--drivers/gpu/drm/radeon/rs600.c236
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h112
-rw-r--r--drivers/gpu/drm/radeon/rs690.c57
-rw-r--r--drivers/gpu/drm/radeon/rv515.c24
-rw-r--r--drivers/gpu/drm/radeon/rv770.c83
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c649
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c311
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
185 files changed, 46807 insertions, 3017 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 3c8827a7aab..470ef6779db 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -31,3 +31,5 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
+obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5cae0b3eee9..5124401f266 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+ { DRM_MODE_DIRTY_OFF, "Off" },
+ { DRM_MODE_DIRTY_ON, "On" },
+ { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+ drm_dirty_info_enum_list)
+
struct drm_conn_prop_enum_list {
int type;
char *name;
@@ -247,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
-void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
@@ -272,7 +282,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
* functions & device file and adds it to the master fd list.
*
* RETURNS:
- * Zero on success, error code on falure.
+ * Zero on success, error code on failure.
*/
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs)
@@ -802,6 +812,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dithering_property);
/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+ struct drm_property *dirty_info;
+ int i;
+
+ if (dev->mode_config.dirty_info_property)
+ return 0;
+
+ dirty_info =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM |
+ DRM_MODE_PROP_IMMUTABLE,
+ "dirty",
+ ARRAY_SIZE(drm_dirty_info_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+ drm_property_add_enum(dirty_info, i,
+ drm_dirty_info_enum_list[i].type,
+ drm_dirty_info_enum_list[i].name);
+ dev->mode_config.dirty_info_property = dirty_info;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
@@ -1753,6 +1793,71 @@ out:
return ret;
}
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out_err1;
+ }
+ fb = obj_to_fb(obj);
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (!clips) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = copy_from_user(clips, clips_ptr,
+ num_clips * sizeof(*clips));
+ if (ret)
+ goto out_err2;
+ }
+
+ if (fb->funcs->dirty) {
+ ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+ } else {
+ ret = -ENOSYS;
+ goto out_err2;
+ }
+
+out_err2:
+ kfree(clips);
+out_err1:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+
/**
* drm_fb_release - remove and free the FBs on this file
* @filp: file * from the ioctl
@@ -2328,7 +2433,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value);
- /* store the property value if succesful */
+ /* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, out_resp->value);
out:
@@ -2478,3 +2583,72 @@ out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_page_flip *page_flip = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ page_flip->reserved != 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj)
+ goto out;
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->page_flip == NULL)
+ goto out;
+
+ obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj)
+ goto out;
+ fb = obj_to_fb(obj);
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ ret = -ENOMEM;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file_priv->event_space -= sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = page_flip->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy =
+ (void (*) (struct drm_pending_event *)) kfree;
+ }
+
+ ret = crtc->funcs->page_flip(crtc, fb, e);
+ if (ret) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bbfd110a716..4231d6db72e 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
count = (*connector_funcs->get_modes)(connector);
if (!count) {
- count = drm_add_modes_noedid(connector, 800, 600);
+ count = drm_add_modes_noedid(connector, 1024, 768);
if (!count)
return 0;
}
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
{
int count = 0;
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev,
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
index a63b6f57d2d..548887c8506 100644
--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -28,84 +28,20 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
#include "drmP.h"
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-
-#define MODE_I2C_START 1
-#define MODE_I2C_WRITE 2
-#define MODE_I2C_READ 4
-#define MODE_I2C_STOP 8
-
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- uint16_t address = algo_data->address;
- uint8_t msg[5];
- uint8_t reply[2];
- int msg_bytes;
- int reply_bytes;
int ret;
-
- /* Set up the command byte */
- if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
- else
- msg[0] = AUX_I2C_WRITE << 4;
-
- if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
-
- msg[1] = address >> 8;
- msg[2] = address;
-
- switch (mode) {
- case MODE_I2C_WRITE:
- msg[3] = 0;
- msg[4] = write_byte;
- msg_bytes = 5;
- reply_bytes = 1;
- break;
- case MODE_I2C_READ:
- msg[3] = 0;
- msg_bytes = 4;
- reply_bytes = 2;
- break;
- default:
- msg_bytes = 3;
- reply_bytes = 1;
- break;
- }
-
- for (;;) {
- ret = (*algo_data->aux_ch)(adapter,
- msg, msg_bytes,
- reply, reply_bytes);
- if (ret < 0) {
- DRM_DEBUG("aux_ch failed %d\n", ret);
- return ret;
- }
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
- if (mode == MODE_I2C_READ) {
- *read_byte = reply[1];
- }
- return reply_bytes - 1;
- case AUX_I2C_REPLY_NACK:
- DRM_DEBUG("aux_ch nack\n");
- return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
- DRM_DEBUG("aux_ch defer\n");
- udelay(100);
- break;
- default:
- DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
- return -EREMOTEIO;
- }
- }
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
}
/*
@@ -224,7 +160,7 @@ i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
if (ret >= 0)
ret = num;
i2c_algo_dp_aux_stop(adapter, reading);
- DRM_DEBUG("dp_aux_xfer return %d\n", ret);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a75ca63deea..ff2f1042cb4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -366,6 +368,29 @@ module_init(drm_core_init);
module_exit(drm_core_exit);
/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
* Get version information
*
* \param inode device inode.
@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_version *version = data;
- int len;
+ int err;
version->version_major = dev->driver->major;
version->version_minor = dev->driver->minor;
version->version_patchlevel = dev->driver->patchlevel;
- DRM_COPY(version->name, dev->driver->name);
- DRM_COPY(version->date, dev->driver->date);
- DRM_COPY(version->desc, dev->driver->desc);
-
- return 0;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
}
/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b54ba63d506..c39b26f1abe 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,18 +123,20 @@ static const u8 edid_header[] = {
*/
static bool edid_is_valid(struct edid *edid)
{
- int i;
+ int i, score = 0;
u8 csum = 0;
u8 *raw_edid = (u8 *)edid;
- if (memcmp(edid->header, edid_header, sizeof(edid_header)))
- goto bad;
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else
goto bad;
- }
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
goto bad;
}
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+
return 1;
bad:
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
+static const int drm_num_dmt_modes =
+ sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
- int i, count;
+ int i;
struct drm_display_mode *ptr, *mode;
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
mode = NULL;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < drm_num_dmt_modes; i++) {
ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
@@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
+/*
+ * XXX fix this for:
+ * - GTF secondary curve formula
+ * - EDID 1.4 range offsets
+ * - CVT extended bits
+ */
+static bool
+mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+{
+ struct detailed_data_monitor_range *range;
+ int hsync, vrefresh;
+
+ range = &timing->data.other_data.data.range;
+
+ hsync = drm_mode_hsync(mode);
+ vrefresh = drm_mode_vrefresh(mode);
+
+ if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return false;
+
+ if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ return false;
+
+ if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
+ /* be forgiving since it's in units of 10MHz */
+ int max_clock = range->pixel_clock_mhz * 10 + 9;
+ max_clock *= 1000;
+ if (mode->clock > max_clock)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < drm_num_dmt_modes; i++) {
+ if (mode_in_range(drm_dmt_modes + i, timing)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+
+ for (i = 0; i < 4; i++) {
+ int width, height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
+ switch (cvt->code[1] & 0xc0) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x40:
+ width = height * 16 / 9;
+ break;
+ case 0x80:
+ width = height * 16 / 10;
+ break;
+ case 0xc0:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int add_detailed_modes(struct drm_connector *connector,
+ struct detailed_timing *timing,
+ struct edid *edid, u32 quirks, int preferred)
+{
+ int i, modes = 0;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ int timing_level = standard_timing_level(edid);
+ int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(dev, edid, timing, quirks);
+ if (!newmode)
+ return 0;
+
+ if (preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, newmode);
+ return 1;
+ }
+
+ /* other timing types */
+ switch (data->type) {
+ case EDID_DETAIL_MONITOR_RANGE:
+ if (gtf)
+ modes += drm_gtf_modes_for_range(connector, timing);
+ break;
+ case EDID_DETAIL_STD_MODES:
+ /* Six modes per detailed section */
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(dev, std, edid->revision,
+ timing_level);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ break;
+ case EDID_DETAIL_CVT_3BYTE:
+ modes += drm_cvt_modes(connector, timing);
+ break;
+ default:
+ break;
+ }
+
+ return modes;
+}
+
/**
- * add_detailed_modes - get detailed mode info from EDID data
+ * add_detailed_info - get detailed mode info from EDID data
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
@@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
+ int i, modes = 0;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- struct detailed_non_pixel *data = &timing->data.other_data;
- struct drm_display_mode *newmode;
-
- /* X server check is version 1.1 or higher */
- if (edid->version == 1 && edid->revision >= 1 &&
- !timing->pixel_clock) {
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- for (j = 0; j < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
- } else {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
+ int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
- /* First detailed mode is preferred */
- if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, newmode);
+ /* In 1.0, only timings are allowed */
+ if (!timing->pixel_clock && edid->version == 1 &&
+ edid->revision == 0)
+ continue;
- modes++;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks,
+ preferred);
}
return modes;
}
+
/**
* add_detailed_mode_eedid - get detailed mode info from addtional timing
* EDID block
@@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
static int add_detailed_info_eedid(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
+ int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- struct detailed_non_pixel *data;
- struct drm_display_mode *newmode;
int edid_ext_num;
int start_offset, end_offset;
int timing_level;
@@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
for (i = start_offset; i < end_offset;
i += sizeof(struct detailed_timing)) {
timing = (struct detailed_timing *)(edid_ext + i);
- data = &timing->data.other_data;
- /* Detailed mode timing */
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
-
- drm_mode_probed_add(connector, newmode);
-
- modes++;
- continue;
- }
-
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- /* Five modes per detailed section */
- for (j = 0; j < 5; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks, 0);
}
return modes;
@@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter,
char *buf, int len)
{
- int ret;
+ int i;
- ret = drm_do_probe_ddc_edid(adapter, buf, len);
- if (ret != 0) {
- goto end;
- }
- if (!edid_is_valid((struct edid *)buf)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- ret = -1;
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, buf, len))
+ return -1;
+ if (edid_is_valid((struct edid *)buf))
+ return 0;
}
-end:
- return ret;
+
+ /* repeated checksum failures; warn, but carry on */
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return -1;
}
/**
@@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
ptr->vdisplay > vdisplay)
continue;
}
+ if (drm_mode_vrefresh(ptr) > 61)
+ continue;
mode = drm_mode_duplicate(dev, ptr);
if (mode) {
drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 65ef011fa8b..1b49fa055f4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
mutex_unlock(&dev->mode_config.mutex);
}
}
- if (dpms_mode == DRM_MODE_DPMS_OFF) {
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ mutex_lock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&dev->mode_config.mutex);
}
}
}
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
+ /* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_on(info);
break;
+ /* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
break;
+ /* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break;
+ /* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
break;
+ /* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
break;
@@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
+ ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
+ if (ret)
+ return ret;
+ if (register_framebuffer(info) < 0) {
+ fb_dealloc_cmap(&info->cmap);
return -EINVAL;
+ }
} else {
drm_fb_helper_set_par(info);
}
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
drm_fb_helper_crtc_free(helper);
+ fb_dealloc_cmap(&helper->fb->fbdev->cmap);
}
EXPORT_SYMBOL(drm_fb_helper_free);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 251bc0e3b5e..08d14df3bb4 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
+ INIT_LIST_HEAD(&priv->event_list);
+ init_waitqueue_head(&priv->event_wait);
+ priv->event_space = 4096; /* set aside 4k for event buffer */
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
}
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, priv, true);
+ if (ret) {
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
} else {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
}
}
+static void drm_events_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ struct drm_pending_vblank_event *v, *vt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Remove pending flips */
+ list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+ if (v->base.file_priv == file_priv) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base);
+ }
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+ e->destroy(e);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* Release file.
*
@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master)
drm_master_release(dev, filp);
+ drm_events_release(file_priv);
+
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, true);
drm_master_put(&file_priv->minor->master);
}
}
@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
-/** No-op. */
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+ size_t total, size_t max, struct drm_pending_event **out)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ *out = NULL;
+ if (list_empty(&file_priv->event_list))
+ goto out;
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + total > max)
+ goto out;
+
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
+ *out = e;
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_pending_event *e;
+ size_t total;
+ ssize_t ret;
+
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0)
+ return ret;
+
+ total = 0;
+ while (drm_dequeue_event(file_priv, total, count, &e)) {
+ if (copy_to_user(buffer + total,
+ e->event, e->event->length)) {
+ total = -EFAULT;
+ break;
+ }
+
+ total += e->event->length;
+ e->destroy(e);
+ }
+
+ return total;
+}
+EXPORT_SYMBOL(drm_read);
+
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
- return 0;
+ struct drm_file *file_priv = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &file_priv->event_wait, wait);
+
+ if (!list_empty(&file_priv->event_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
}
EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0a6f0b3bdc7..7998ee66b31 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
- !dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ if (!dev->vblank_enabled[crtc]) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
+ ret = -EINVAL;
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ dev->vblank_enabled[crtc] = 0;
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
@@ -550,6 +568,63 @@ out:
return ret;
}
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+{
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ e->pipe = pipe;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = vblwait->request.signal;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ do_gettimeofday(&now);
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+ return -ENOMEM;
+ }
+
+ file_priv->event_space -= sizeof e->event;
+ seq = drm_vblank_count(dev, pipe);
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ vblwait->request.sequence, seq, pipe);
+
+ e->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ } else {
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return 0;
+}
+
/**
* Wait for VBLANK.
*
@@ -609,6 +684,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
+ if (flags & _DRM_VBLANK_EVENT)
+ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
vblwait->request.sequence = seq + 1;
@@ -641,6 +719,38 @@ done:
return ret;
}
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ do_gettimeofday(&now);
+ seq = drm_vblank_count(dev, crtc);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ if ((seq - e->event.sequence) > (1<<23))
+ continue;
+
+ DRM_DEBUG("vblank event on %d, current %d\n",
+ e->event.sequence, seq);
+
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
@@ -651,7 +761,11 @@ done:
*/
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ if (!dev->num_crtcs)
+ return;
+
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ drm_handle_vblank_events(dev, crtc);
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 97dc5a4f0de..d7d7eac3ddd 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *align_splitoff = NULL;
+ unsigned tmp = 0;
+ unsigned wasted = 0;
+
+ if (node->start < start)
+ wasted += start - node->start;
+ if (alignment)
+ tmp = ((node->start + wasted) % alignment);
+
+ if (tmp)
+ wasted += alignment - tmp;
+ if (wasted) {
+ align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+ if (unlikely(align_splitoff == NULL))
+ return NULL;
+ }
+
+ if (node->size == size) {
+ list_del_init(&node->fl_entry);
+ node->free = 0;
+ } else {
+ node = drm_mm_split_at_start(node, size, atomic);
+ }
+
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
}
EXPORT_SYMBOL(drm_mm_search_free);
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match)
+{
+ struct list_head *list;
+ const struct list_head *free_stack = &mm->fl_entry;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+ unsigned wasted;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each(list, free_stack) {
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
+ wasted = 0;
+
+ if (entry->size < size)
+ continue;
+
+ if (entry->start > end || (entry->start+entry->size) < start)
+ continue;
+
+ if (entry->start < start)
+ wasted += start - entry->start;
+
+ if (alignment) {
+ register unsigned tmp = (entry->start + wasted) % alignment;
+ if (tmp)
+ wasted += alignment - tmp;
+ }
+
+ if (entry->size >= size + wasted) {
+ if (!best_match)
+ return entry;
+ if (size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
}
EXPORT_SYMBOL(drm_mm_takedown);
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+ struct drm_mm_node *entry;
+ int total_used = 0, total_free = 0, total = 0;
+
+ list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ prefix, entry->start, entry->start + entry->size,
+ entry->size, entry->free ? "free" : "used");
+ total += entry->size;
+ if (entry->free)
+ total_free += entry->size;
+ else
+ total_used += entry->size;
+ }
+ printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
#if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
@@ -395,7 +503,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
else
total_used += entry->size;
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
+ seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 51f677215f1..6d81a02463a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_mode_height);
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+ unsigned int calc_val;
+
+ if (mode->hsync)
+ return mode->hsync;
+
+ if (mode->htotal < 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+ calc_val += 500; /* round to 1000Hz */
+ calc_val /= 1000; /* truncate to kHz */
+
+ return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
* LOCKING:
* None.
*
- * Return @mode's vrefresh rate or calculate it if necessary.
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
*
* FIXME: why is this needed? shouldn't vrefresh be set already?
*
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 55bb8a82d61..ad73e141afd 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
kref_get(&master->refcount);
return master;
}
+EXPORT_SYMBOL(drm_master_get);
static void drm_master_destroy(struct kref *kref)
{
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
kref_put(&(*master)->refcount, drm_master_destroy);
*master = NULL;
}
+EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ int ret = 0;
+
if (file_priv->is_master)
return 0;
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
+ }
+ }
mutex_unlock(&dev->struct_mutex);
}
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
mutex_unlock(&dev->struct_mutex);
@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
+ spin_lock_init(&dev->event_lock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
new file mode 100644
index 00000000000..6d2abaf35ba
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+
+ch7006-y := ch7006_drv.o ch7006_mode.o
+obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
new file mode 100644
index 00000000000..9422a74c8b5
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ch7006_priv.h"
+
+/* DRM encoder functions */
+
+static void ch7006_encoder_set_config(struct drm_encoder *encoder,
+ void *params)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ priv->params = params;
+}
+
+static void ch7006_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ drm_property_destroy(encoder->dev, priv->scale_property);
+
+ kfree(priv);
+ to_encoder_slave(encoder)->slave_priv = NULL;
+
+ drm_i2c_encoder_destroy(encoder);
+}
+
+static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+
+ ch7006_dbg(client, "\n");
+
+ if (mode == priv->last_dpms)
+ return;
+ priv->last_dpms = mode;
+
+ ch7006_setup_power_state(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POWER);
+}
+
+static void ch7006_encoder_save(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ ch7006_dbg(client, "\n");
+
+ ch7006_state_save(client, &priv->saved_state);
+}
+
+static void ch7006_encoder_restore(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ ch7006_dbg(client, "\n");
+
+ ch7006_state_load(client, &priv->saved_state);
+}
+
+static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ /* The ch7006 is painfully picky with the input timings so no
+ * custom modes for now... */
+
+ priv->mode = ch7006_lookup_mode(encoder, mode);
+
+ return !!priv->mode;
+}
+
+static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ if (ch7006_lookup_mode(encoder, mode))
+ return MODE_OK;
+ else
+ return MODE_BAD;
+}
+
+static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_encoder_params *params = priv->params;
+ struct ch7006_state *state = &priv->state;
+ uint8_t *regs = state->regs;
+ struct ch7006_mode *mode = priv->mode;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ int start_active;
+
+ ch7006_dbg(client, "\n");
+
+ regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode;
+ regs[CH7006_BWIDTH] = 0;
+ regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT,
+ params->input_format);
+
+ regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK
+ | bitf(CH7006_CLKMODE_XCM, params->xcm)
+ | bitf(CH7006_CLKMODE_PCM, params->pcm);
+ if (params->clock_mode)
+ regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER;
+ if (params->clock_edge)
+ regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE;
+
+ start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7);
+ regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active);
+ regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active);
+
+ regs[CH7006_INPUT_SYNC] = 0;
+ if (params->sync_direction)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT;
+ if (params->sync_encoding)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED;
+ if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC;
+ if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC;
+
+ regs[CH7006_DETECT] = 0;
+ regs[CH7006_BCLKOUT] = 0;
+
+ regs[CH7006_SUBC_INC3] = 0;
+ if (params->pout_level)
+ regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V;
+
+ regs[CH7006_SUBC_INC4] = 0;
+ if (params->active_detect)
+ regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT;
+
+ regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL];
+
+ ch7006_setup_levels(encoder);
+ ch7006_setup_subcarrier(encoder);
+ ch7006_setup_pll(encoder);
+ ch7006_setup_power_state(encoder);
+ ch7006_setup_properties(encoder);
+
+ ch7006_state_load(client, state);
+}
+
+static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ int det;
+
+ ch7006_dbg(client, "\n");
+
+ ch7006_save_reg(client, state, CH7006_DETECT);
+ ch7006_save_reg(client, state, CH7006_POWER);
+ ch7006_save_reg(client, state, CH7006_CLKMODE);
+
+ ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET |
+ bitfs(CH7006_POWER_LEVEL, NORMAL));
+ ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER);
+
+ ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE);
+
+ ch7006_write(client, CH7006_DETECT, 0);
+
+ det = ch7006_read(client, CH7006_DETECT);
+
+ ch7006_load_reg(client, state, CH7006_CLKMODE);
+ ch7006_load_reg(client, state, CH7006_POWER);
+ ch7006_load_reg(client, state, CH7006_DETECT);
+
+ if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+ CH7006_DETECT_SVIDEO_C_TEST|
+ CH7006_DETECT_CVBS_TEST)) == 0)
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+ else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+ CH7006_DETECT_SVIDEO_C_TEST)) == 0)
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+ else if ((det & CH7006_DETECT_CVBS_TEST) == 0)
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+ else
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ drm_connector_property_set_value(connector,
+ encoder->dev->mode_config.tv_subconnector_property,
+ priv->subconnector);
+
+ return priv->subconnector ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_mode *mode;
+ int n = 0;
+
+ for (mode = ch7006_modes; mode->mode.clock; mode++) {
+ if (~mode->valid_scales & 1<<priv->scale ||
+ ~mode->valid_norms & 1<<priv->norm)
+ continue;
+
+ drm_mode_probed_add(connector,
+ drm_mode_duplicate(encoder->dev, &mode->mode));
+
+ n++;
+ }
+
+ return n;
+}
+
+static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_mode_config *conf = &dev->mode_config;
+
+ drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+
+ priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "scale", 2);
+ priv->scale_property->values[0] = 0;
+ priv->scale_property->values[1] = 2;
+
+ drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ priv->select_subconnector);
+ drm_connector_attach_property(connector, conf->tv_subconnector_property,
+ priv->subconnector);
+ drm_connector_attach_property(connector, conf->tv_left_margin_property,
+ priv->hmargin);
+ drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+ priv->vmargin);
+ drm_connector_attach_property(connector, conf->tv_mode_property,
+ priv->norm);
+ drm_connector_attach_property(connector, conf->tv_brightness_property,
+ priv->brightness);
+ drm_connector_attach_property(connector, conf->tv_contrast_property,
+ priv->contrast);
+ drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+ priv->flicker);
+ drm_connector_attach_property(connector, priv->scale_property,
+ priv->scale);
+
+ return 0;
+}
+
+static int ch7006_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ struct drm_mode_config *conf = &encoder->dev->mode_config;
+ struct drm_crtc *crtc = encoder->crtc;
+ bool modes_changed = false;
+
+ ch7006_dbg(client, "\n");
+
+ if (property == conf->tv_select_subconnector_property) {
+ priv->select_subconnector = val;
+
+ ch7006_setup_power_state(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POWER);
+
+ } else if (property == conf->tv_left_margin_property) {
+ priv->hmargin = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POV);
+ ch7006_load_reg(client, state, CH7006_HPOS);
+
+ } else if (property == conf->tv_bottom_margin_property) {
+ priv->vmargin = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POV);
+ ch7006_load_reg(client, state, CH7006_VPOS);
+
+ } else if (property == conf->tv_mode_property) {
+ if (connector->dpms != DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ priv->norm = val;
+
+ modes_changed = true;
+
+ } else if (property == conf->tv_brightness_property) {
+ priv->brightness = val;
+
+ ch7006_setup_levels(encoder);
+
+ ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+
+ } else if (property == conf->tv_contrast_property) {
+ priv->contrast = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_CONTRAST);
+
+ } else if (property == conf->tv_flicker_reduction_property) {
+ priv->flicker = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_FFILTER);
+
+ } else if (property == priv->scale_property) {
+ if (connector->dpms != DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ priv->scale = val;
+
+ modes_changed = true;
+
+ } else {
+ return -EINVAL;
+ }
+
+ if (modes_changed) {
+ drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+ /* Disable the crtc to ensure a full modeset is
+ * performed whenever it's turned on again. */
+ if (crtc) {
+ struct drm_mode_set modeset = {
+ .crtc = crtc,
+ };
+
+ crtc->funcs->set_config(&modeset);
+ }
+ }
+
+ return 0;
+}
+
+static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+ .set_config = ch7006_encoder_set_config,
+ .destroy = ch7006_encoder_destroy,
+ .dpms = ch7006_encoder_dpms,
+ .save = ch7006_encoder_save,
+ .restore = ch7006_encoder_restore,
+ .mode_fixup = ch7006_encoder_mode_fixup,
+ .mode_valid = ch7006_encoder_mode_valid,
+ .mode_set = ch7006_encoder_mode_set,
+ .detect = ch7006_encoder_detect,
+ .get_modes = ch7006_encoder_get_modes,
+ .create_resources = ch7006_encoder_create_resources,
+ .set_property = ch7006_encoder_set_property,
+};
+
+
+/* I2C driver functions */
+
+static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ uint8_t addr = CH7006_VERSION_ID;
+ uint8_t val;
+ int ret;
+
+ ch7006_dbg(client, "\n");
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ ch7006_info(client, "Detected version ID: %x\n", val);
+
+ return 0;
+
+fail:
+ ch7006_err(client, "Error %d reading version ID\n", ret);
+
+ return -ENODEV;
+}
+
+static int ch7006_remove(struct i2c_client *client)
+{
+ ch7006_dbg(client, "\n");
+
+ return 0;
+}
+
+static int ch7006_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct ch7006_priv *priv;
+ int i;
+
+ ch7006_dbg(client, "\n");
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ encoder->slave_priv = priv;
+ encoder->slave_funcs = &ch7006_encoder_funcs;
+
+ priv->norm = TV_NORM_PAL;
+ priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+ priv->scale = 1;
+ priv->contrast = 50;
+ priv->brightness = 50;
+ priv->flicker = 50;
+ priv->hmargin = 50;
+ priv->vmargin = 50;
+ priv->last_dpms = -1;
+
+ if (ch7006_tv_norm) {
+ for (i = 0; i < NUM_TV_NORMS; i++) {
+ if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) {
+ priv->norm = i;
+ break;
+ }
+ }
+
+ if (i == NUM_TV_NORMS)
+ ch7006_err(client, "Invalid TV norm setting \"%s\".\n",
+ ch7006_tv_norm);
+ }
+
+ if (ch7006_scale >= 0 && ch7006_scale <= 2)
+ priv->scale = ch7006_scale;
+ else
+ ch7006_err(client, "Invalid scale setting \"%d\".\n",
+ ch7006_scale);
+
+ return 0;
+}
+
+static struct i2c_device_id ch7006_ids[] = {
+ { "ch7006", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ch7006_ids);
+
+static struct drm_i2c_encoder_driver ch7006_driver = {
+ .i2c_driver = {
+ .probe = ch7006_probe,
+ .remove = ch7006_remove,
+
+ .driver = {
+ .name = "ch7006",
+ },
+
+ .id_table = ch7006_ids,
+ },
+
+ .encoder_init = ch7006_encoder_init,
+};
+
+
+/* Module initialization */
+
+static int __init ch7006_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
+}
+
+static void __exit ch7006_exit(void)
+{
+ drm_i2c_encoder_unregister(&ch7006_driver);
+}
+
+int ch7006_debug;
+module_param_named(debug, ch7006_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Enable debug output.");
+
+char *ch7006_tv_norm;
+module_param_named(tv_norm, ch7006_tv_norm, charp, 0600);
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+ "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n"
+ "\t\tDefault: PAL");
+
+int ch7006_scale = 1;
+module_param_named(scale, ch7006_scale, int, 0600);
+MODULE_PARM_DESC(scale, "Default scale.\n"
+ "\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n"
+ "\t\t\t1 -> Select default video modes.\n"
+ "\t\t\t2 -> Select video modes with a lower blanking ratio.");
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(ch7006_init);
+module_exit(ch7006_exit);
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
new file mode 100644
index 00000000000..87f5445092e
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ch7006_priv.h"
+
+char *ch7006_tv_norm_names[] = {
+ [TV_NORM_PAL] = "PAL",
+ [TV_NORM_PAL_M] = "PAL-M",
+ [TV_NORM_PAL_N] = "PAL-N",
+ [TV_NORM_PAL_NC] = "PAL-Nc",
+ [TV_NORM_PAL_60] = "PAL-60",
+ [TV_NORM_NTSC_M] = "NTSC-M",
+ [TV_NORM_NTSC_J] = "NTSC-J",
+};
+
+#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001, \
+ .vdisplay = 480, \
+ .vtotal = 525, \
+ .hvirtual = 660
+
+#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1, \
+ .vdisplay = 576, \
+ .vtotal = 625, \
+ .hvirtual = 810
+
+struct ch7006_tv_norm_info ch7006_tv_norms[] = {
+ [TV_NORM_NTSC_M] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.339 * fixed1,
+ .subc_freq = 3579545 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC),
+ .voffset = 0,
+ },
+ [TV_NORM_NTSC_J] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.286 * fixed1,
+ .subc_freq = 3579545 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL] = {
+ PAL_LIKE_TIMINGS,
+ .black_level = 0.3 * fixed1,
+ .subc_freq = 4433618.75 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL_M] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.339 * fixed1,
+ .subc_freq = 3575611.433 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+ .voffset = 16,
+ },
+
+ /* The following modes seem to work right but they're
+ * undocumented */
+
+ [TV_NORM_PAL_N] = {
+ PAL_LIKE_TIMINGS,
+ .black_level = 0.339 * fixed1,
+ .subc_freq = 4433618.75 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL_NC] = {
+ PAL_LIKE_TIMINGS,
+ .black_level = 0.3 * fixed1,
+ .subc_freq = 3582056.25 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL_60] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.3 * fixed1,
+ .subc_freq = 4433618.75 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+ .voffset = 16,
+ },
+};
+
+#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
+ subc, scale, scale_mask, norm_mask, e_hd, e_vd) { \
+ .mode = { \
+ .name = #hd "x" #vd, \
+ .status = 0, \
+ .type = DRM_MODE_TYPE_DRIVER, \
+ .clock = f, \
+ .hdisplay = hd, \
+ .hsync_start = e_hd + 16, \
+ .hsync_end = e_hd + 80, \
+ .htotal = ht, \
+ .hskew = 0, \
+ .vdisplay = vd, \
+ .vsync_start = vd + 10, \
+ .vsync_end = vd + 26, \
+ .vtotal = vt, \
+ .vscan = 0, \
+ .flags = DRM_MODE_FLAG_##hsynp##HSYNC | \
+ DRM_MODE_FLAG_##vsynp##VSYNC, \
+ .vrefresh = 0, \
+ }, \
+ .enc_hdisp = e_hd, \
+ .enc_vdisp = e_vd, \
+ .subc_coeff = subc * fixed1, \
+ .dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \
+ bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \
+ .valid_scales = scale_mask, \
+ .valid_norms = norm_mask \
+ }
+
+#define MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
+ subc, scale, scale_mask, norm_mask) \
+ __MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale, \
+ scale_mask, norm_mask, hd, vd)
+
+#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J | \
+ 1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60)
+
+#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
+
+struct ch7006_mode ch7006_modes[] = {
+ MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
+ MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
+ MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
+ MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE),
+ MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE),
+ MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE),
+ MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE),
+ MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE),
+ MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE),
+ MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE),
+ MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE),
+ MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE),
+ MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE),
+ MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE),
+ MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE),
+ MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE),
+ MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE),
+ MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE),
+ MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE),
+ __MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600),
+ MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE),
+ MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE),
+ MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE),
+ MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE),
+ MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE),
+ {}
+};
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_mode *mode;
+
+ for (mode = ch7006_modes; mode->mode.clock; mode++) {
+
+ if (~mode->valid_norms & 1<<priv->norm)
+ continue;
+
+ if (mode->mode.hdisplay != drm_mode->hdisplay ||
+ mode->mode.vdisplay != drm_mode->vdisplay ||
+ mode->mode.vtotal != drm_mode->vtotal ||
+ mode->mode.htotal != drm_mode->htotal ||
+ mode->mode.clock != drm_mode->clock)
+ continue;
+
+ return mode;
+ }
+
+ return NULL;
+}
+
+/* Some common HW state calculation code */
+
+void ch7006_setup_levels(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ uint8_t *regs = priv->state.regs;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ int gain;
+ int black_level;
+
+ /* Set DAC_GAIN if the voltage drop between white and black is
+ * high enough. */
+ if (norm->black_level < 339*fixed1/1000) {
+ gain = 76;
+
+ regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN;
+ } else {
+ gain = 71;
+
+ regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN;
+ }
+
+ black_level = round_fixed(norm->black_level*26625)/gain;
+
+ /* Correct it with the specified brightness. */
+ black_level = interpolate(90, black_level, 208, priv->brightness);
+
+ regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level);
+
+ ch7006_dbg(client, "black level: %d\n", black_level);
+}
+
+void ch7006_setup_subcarrier(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ struct ch7006_mode *mode = priv->mode;
+ uint32_t subc_inc;
+
+ subc_inc = round_fixed((mode->subc_coeff >> 8)
+ * (norm->subc_freq >> 24));
+
+ setbitf(state, CH7006_SUBC_INC0, 28, subc_inc);
+ setbitf(state, CH7006_SUBC_INC1, 24, subc_inc);
+ setbitf(state, CH7006_SUBC_INC2, 20, subc_inc);
+ setbitf(state, CH7006_SUBC_INC3, 16, subc_inc);
+ setbitf(state, CH7006_SUBC_INC4, 12, subc_inc);
+ setbitf(state, CH7006_SUBC_INC5, 8, subc_inc);
+ setbitf(state, CH7006_SUBC_INC6, 4, subc_inc);
+ setbitf(state, CH7006_SUBC_INC7, 0, subc_inc);
+
+ ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc);
+}
+
+void ch7006_setup_pll(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ uint8_t *regs = priv->state.regs;
+ struct ch7006_mode *mode = priv->mode;
+ int n, best_n = 0;
+ int m, best_m = 0;
+ int freq, best_freq = 0;
+
+ for (n = 0; n < CH7006_MAXN; n++) {
+ for (m = 0; m < CH7006_MAXM; m++) {
+ freq = CH7006_FREQ0*(n+2)/(m+2);
+
+ if (abs(freq - mode->mode.clock) <
+ abs(best_freq - mode->mode.clock)) {
+ best_freq = freq;
+ best_n = n;
+ best_m = m;
+ }
+ }
+ }
+
+ regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) |
+ bitf(CH7006_PLLOV_M_8, best_m);
+
+ regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m);
+ regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n);
+
+ if (best_n < 108)
+ regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR;
+ else
+ regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR;
+
+ ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n",
+ best_n, best_m, best_freq, best_n < 108);
+}
+
+void ch7006_setup_power_state(struct drm_encoder *encoder)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ uint8_t *power = &priv->state.regs[CH7006_POWER];
+ int subconnector;
+
+ subconnector = priv->select_subconnector ? priv->select_subconnector :
+ priv->subconnector;
+
+ *power = CH7006_POWER_RESET;
+
+ if (priv->last_dpms == DRM_MODE_DPMS_ON) {
+ switch (subconnector) {
+ case DRM_MODE_SUBCONNECTOR_SVIDEO:
+ *power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF);
+ break;
+ case DRM_MODE_SUBCONNECTOR_Composite:
+ *power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF);
+ break;
+ case DRM_MODE_SUBCONNECTOR_SCART:
+ *power |= bitfs(CH7006_POWER_LEVEL, NORMAL) |
+ CH7006_POWER_SCART;
+ break;
+ }
+
+ } else {
+ *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ }
+}
+
+void ch7006_setup_properties(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ struct ch7006_mode *ch_mode = priv->mode;
+ struct drm_display_mode *mode = &ch_mode->mode;
+ uint8_t *regs = state->regs;
+ int flicker, contrast, hpos, vpos;
+ uint64_t scale, aspect;
+
+ flicker = interpolate(0, 2, 3, priv->flicker);
+ regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) |
+ bitf(CH7006_FFILTER_LUMA, flicker) |
+ bitf(CH7006_FFILTER_CHROMA, 1);
+
+ contrast = interpolate(0, 5, 7, priv->contrast);
+ regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast);
+
+ scale = norm->vtotal*fixed1;
+ do_div(scale, mode->vtotal);
+
+ aspect = ch_mode->enc_hdisp*fixed1;
+ do_div(aspect, ch_mode->enc_vdisp);
+
+ hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale)
+ * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4;
+
+ setbitf(state, CH7006_POV, HPOS_8, hpos);
+ setbitf(state, CH7006_HPOS, 0, hpos);
+
+ vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale)
+ + norm->voffset) * priv->vmargin / 100 / 2;
+
+ setbitf(state, CH7006_POV, VPOS_8, vpos);
+ setbitf(state, CH7006_VPOS, 0, vpos);
+
+ ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos);
+}
+
+/* HW access functions */
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ ch7006_err(client, "Error %d writing to subaddress 0x%x\n",
+ ret, addr);
+}
+
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr)
+{
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ ch7006_err(client, "Error %d reading from subaddress 0x%x\n",
+ ret, addr);
+ return 0;
+}
+
+void ch7006_state_load(struct i2c_client *client,
+ struct ch7006_state *state)
+{
+ ch7006_load_reg(client, state, CH7006_POWER);
+
+ ch7006_load_reg(client, state, CH7006_DISPMODE);
+ ch7006_load_reg(client, state, CH7006_FFILTER);
+ ch7006_load_reg(client, state, CH7006_BWIDTH);
+ ch7006_load_reg(client, state, CH7006_INPUT_FORMAT);
+ ch7006_load_reg(client, state, CH7006_CLKMODE);
+ ch7006_load_reg(client, state, CH7006_START_ACTIVE);
+ ch7006_load_reg(client, state, CH7006_POV);
+ ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+ ch7006_load_reg(client, state, CH7006_HPOS);
+ ch7006_load_reg(client, state, CH7006_VPOS);
+ ch7006_load_reg(client, state, CH7006_INPUT_SYNC);
+ ch7006_load_reg(client, state, CH7006_DETECT);
+ ch7006_load_reg(client, state, CH7006_CONTRAST);
+ ch7006_load_reg(client, state, CH7006_PLLOV);
+ ch7006_load_reg(client, state, CH7006_PLLM);
+ ch7006_load_reg(client, state, CH7006_PLLN);
+ ch7006_load_reg(client, state, CH7006_BCLKOUT);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC0);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC1);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC2);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC3);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC4);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC5);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC6);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC7);
+ ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
+ ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
+
+ /* I don't know what this is for, but otherwise I get no
+ * signal.
+ */
+ ch7006_write(client, 0x3d, 0x0);
+}
+
+void ch7006_state_save(struct i2c_client *client,
+ struct ch7006_state *state)
+{
+ ch7006_save_reg(client, state, CH7006_POWER);
+
+ ch7006_save_reg(client, state, CH7006_DISPMODE);
+ ch7006_save_reg(client, state, CH7006_FFILTER);
+ ch7006_save_reg(client, state, CH7006_BWIDTH);
+ ch7006_save_reg(client, state, CH7006_INPUT_FORMAT);
+ ch7006_save_reg(client, state, CH7006_CLKMODE);
+ ch7006_save_reg(client, state, CH7006_START_ACTIVE);
+ ch7006_save_reg(client, state, CH7006_POV);
+ ch7006_save_reg(client, state, CH7006_BLACK_LEVEL);
+ ch7006_save_reg(client, state, CH7006_HPOS);
+ ch7006_save_reg(client, state, CH7006_VPOS);
+ ch7006_save_reg(client, state, CH7006_INPUT_SYNC);
+ ch7006_save_reg(client, state, CH7006_DETECT);
+ ch7006_save_reg(client, state, CH7006_CONTRAST);
+ ch7006_save_reg(client, state, CH7006_PLLOV);
+ ch7006_save_reg(client, state, CH7006_PLLM);
+ ch7006_save_reg(client, state, CH7006_PLLN);
+ ch7006_save_reg(client, state, CH7006_BCLKOUT);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC0);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC1);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC2);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC3);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC4);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC5);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC6);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC7);
+ ch7006_save_reg(client, state, CH7006_PLL_CONTROL);
+ ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0);
+
+ state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) |
+ (state->regs[CH7006_FFILTER] & 0x0c) >> 2 |
+ (state->regs[CH7006_FFILTER] & 0x03) << 2;
+}
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
new file mode 100644
index 00000000000..b06d3d93d8a
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_CH7006_PRIV_H__
+#define __DRM_I2C_CH7006_PRIV_H__
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "drm_encoder_slave.h"
+#include "i2c/ch7006.h"
+
+typedef int64_t fixed;
+#define fixed1 (1LL << 32)
+
+enum ch7006_tv_norm {
+ TV_NORM_PAL,
+ TV_NORM_PAL_M,
+ TV_NORM_PAL_N,
+ TV_NORM_PAL_NC,
+ TV_NORM_PAL_60,
+ TV_NORM_NTSC_M,
+ TV_NORM_NTSC_J,
+ NUM_TV_NORMS
+};
+
+struct ch7006_tv_norm_info {
+ fixed vrefresh;
+ int vdisplay;
+ int vtotal;
+ int hvirtual;
+
+ fixed subc_freq;
+ fixed black_level;
+
+ uint32_t dispmode;
+ int voffset;
+};
+
+struct ch7006_mode {
+ struct drm_display_mode mode;
+
+ int enc_hdisp;
+ int enc_vdisp;
+
+ fixed subc_coeff;
+ uint32_t dispmode;
+
+ uint32_t valid_scales;
+ uint32_t valid_norms;
+};
+
+struct ch7006_state {
+ uint8_t regs[0x26];
+};
+
+struct ch7006_priv {
+ struct ch7006_encoder_params *params;
+ struct ch7006_mode *mode;
+
+ struct ch7006_state state;
+ struct ch7006_state saved_state;
+
+ struct drm_property *scale_property;
+
+ int select_subconnector;
+ int subconnector;
+ int hmargin;
+ int vmargin;
+ enum ch7006_tv_norm norm;
+ int brightness;
+ int contrast;
+ int flicker;
+ int scale;
+
+ int last_dpms;
+};
+
+#define to_ch7006_priv(x) \
+ ((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
+
+extern int ch7006_debug;
+extern char *ch7006_tv_norm;
+extern int ch7006_scale;
+
+extern char *ch7006_tv_norm_names[];
+extern struct ch7006_tv_norm_info ch7006_tv_norms[];
+extern struct ch7006_mode ch7006_modes[];
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode);
+
+void ch7006_setup_levels(struct drm_encoder *encoder);
+void ch7006_setup_subcarrier(struct drm_encoder *encoder);
+void ch7006_setup_pll(struct drm_encoder *encoder);
+void ch7006_setup_power_state(struct drm_encoder *encoder);
+void ch7006_setup_properties(struct drm_encoder *encoder);
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val);
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr);
+
+void ch7006_state_load(struct i2c_client *client,
+ struct ch7006_state *state);
+void ch7006_state_save(struct i2c_client *client,
+ struct ch7006_state *state);
+
+/* Some helper macros */
+
+#define ch7006_dbg(client, format, ...) do { \
+ if (ch7006_debug) \
+ dev_printk(KERN_DEBUG, &client->dev, \
+ "%s: " format, __func__, ## __VA_ARGS__); \
+ } while (0)
+#define ch7006_info(client, format, ...) \
+ dev_info(&client->dev, format, __VA_ARGS__)
+#define ch7006_err(client, format, ...) \
+ dev_err(&client->dev, format, __VA_ARGS__)
+
+#define __mask(src, bitfield) \
+ (((2 << (1 ? bitfield)) - 1) & ~((1 << (0 ? bitfield)) - 1))
+#define mask(bitfield) __mask(bitfield)
+
+#define __bitf(src, bitfield, x) \
+ (((x) >> (src) << (0 ? bitfield)) & __mask(src, bitfield))
+#define bitf(bitfield, x) __bitf(bitfield, x)
+#define bitfs(bitfield, s) __bitf(bitfield, bitfield##_##s)
+#define setbitf(state, reg, bitfield, x) \
+ state->regs[reg] = (state->regs[reg] & ~mask(reg##_##bitfield)) \
+ | bitf(reg##_##bitfield, x)
+
+#define __unbitf(src, bitfield, x) \
+ ((x & __mask(src, bitfield)) >> (0 ? bitfield) << (src))
+#define unbitf(bitfield, x) __unbitf(bitfield, x)
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+ return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+static inline int32_t round_fixed(fixed x)
+{
+ return (x + fixed1/2) >> 32;
+}
+
+#define ch7006_load_reg(client, state, reg) ch7006_write(client, reg, state->regs[reg])
+#define ch7006_save_reg(client, state, reg) state->regs[reg] = ch7006_read(client, reg)
+
+/* Fixed hardware specs */
+
+#define CH7006_FREQ0 14318
+#define CH7006_MAXN 650
+#define CH7006_MAXM 315
+
+/* Register definitions */
+
+#define CH7006_DISPMODE 0x00
+#define CH7006_DISPMODE_INPUT_RES 0, 7:5
+#define CH7006_DISPMODE_INPUT_RES_512x384 0x0
+#define CH7006_DISPMODE_INPUT_RES_720x400 0x1
+#define CH7006_DISPMODE_INPUT_RES_640x400 0x2
+#define CH7006_DISPMODE_INPUT_RES_640x480 0x3
+#define CH7006_DISPMODE_INPUT_RES_800x600 0x4
+#define CH7006_DISPMODE_INPUT_RES_NATIVE 0x5
+#define CH7006_DISPMODE_OUTPUT_STD 0, 4:3
+#define CH7006_DISPMODE_OUTPUT_STD_PAL 0x0
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC 0x1
+#define CH7006_DISPMODE_OUTPUT_STD_PAL_M 0x2
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC_J 0x3
+#define CH7006_DISPMODE_SCALING_RATIO 0, 2:0
+#define CH7006_DISPMODE_SCALING_RATIO_5_4 0x0
+#define CH7006_DISPMODE_SCALING_RATIO_1_1 0x1
+#define CH7006_DISPMODE_SCALING_RATIO_7_8 0x2
+#define CH7006_DISPMODE_SCALING_RATIO_5_6 0x3
+#define CH7006_DISPMODE_SCALING_RATIO_3_4 0x4
+#define CH7006_DISPMODE_SCALING_RATIO_7_10 0x5
+
+#define CH7006_FFILTER 0x01
+#define CH7006_FFILTER_TEXT 0, 5:4
+#define CH7006_FFILTER_LUMA 0, 3:2
+#define CH7006_FFILTER_CHROMA 0, 1:0
+#define CH7006_FFILTER_CHROMA_NO_DCRAWL 0x3
+
+#define CH7006_BWIDTH 0x03
+#define CH7006_BWIDTH_5L_FFILER (1 << 7)
+#define CH7006_BWIDTH_CVBS_NO_CHROMA (1 << 6)
+#define CH7006_BWIDTH_CHROMA 0, 5:4
+#define CH7006_BWIDTH_SVIDEO_YPEAK (1 << 3)
+#define CH7006_BWIDTH_SVIDEO_LUMA 0, 2:1
+#define CH7006_BWIDTH_CVBS_LUMA 0, 0:0
+
+#define CH7006_INPUT_FORMAT 0x04
+#define CH7006_INPUT_FORMAT_DAC_GAIN (1 << 6)
+#define CH7006_INPUT_FORMAT_RGB_PASS_THROUGH (1 << 5)
+#define CH7006_INPUT_FORMAT_FORMAT 0, 3:0
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16 0x0
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m16 0x1
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m16 0x2
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15 0x3
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12C 0x4
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12I 0x5
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m8 0x6
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16m8 0x7
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15m8 0x8
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m8 0x9
+
+#define CH7006_CLKMODE 0x06
+#define CH7006_CLKMODE_SUBC_LOCK (1 << 7)
+#define CH7006_CLKMODE_MASTER (1 << 6)
+#define CH7006_CLKMODE_POS_EDGE (1 << 4)
+#define CH7006_CLKMODE_XCM 0, 3:2
+#define CH7006_CLKMODE_PCM 0, 1:0
+
+#define CH7006_START_ACTIVE 0x07
+#define CH7006_START_ACTIVE_0 0, 7:0
+
+#define CH7006_POV 0x08
+#define CH7006_POV_START_ACTIVE_8 8, 2:2
+#define CH7006_POV_HPOS_8 8, 1:1
+#define CH7006_POV_VPOS_8 8, 0:0
+
+#define CH7006_BLACK_LEVEL 0x09
+#define CH7006_BLACK_LEVEL_0 0, 7:0
+
+#define CH7006_HPOS 0x0a
+#define CH7006_HPOS_0 0, 7:0
+
+#define CH7006_VPOS 0x0b
+#define CH7006_VPOS_0 0, 7:0
+
+#define CH7006_INPUT_SYNC 0x0d
+#define CH7006_INPUT_SYNC_EMBEDDED (1 << 3)
+#define CH7006_INPUT_SYNC_OUTPUT (1 << 2)
+#define CH7006_INPUT_SYNC_PVSYNC (1 << 1)
+#define CH7006_INPUT_SYNC_PHSYNC (1 << 0)
+
+#define CH7006_POWER 0x0e
+#define CH7006_POWER_SCART (1 << 4)
+#define CH7006_POWER_RESET (1 << 3)
+#define CH7006_POWER_LEVEL 0, 2:0
+#define CH7006_POWER_LEVEL_CVBS_OFF 0x0
+#define CH7006_POWER_LEVEL_POWER_OFF 0x1
+#define CH7006_POWER_LEVEL_SVIDEO_OFF 0x2
+#define CH7006_POWER_LEVEL_NORMAL 0x3
+#define CH7006_POWER_LEVEL_FULL_POWER_OFF 0x4
+
+#define CH7006_DETECT 0x10
+#define CH7006_DETECT_SVIDEO_Y_TEST (1 << 3)
+#define CH7006_DETECT_SVIDEO_C_TEST (1 << 2)
+#define CH7006_DETECT_CVBS_TEST (1 << 1)
+#define CH7006_DETECT_SENSE (1 << 0)
+
+#define CH7006_CONTRAST 0x11
+#define CH7006_CONTRAST_0 0, 2:0
+
+#define CH7006_PLLOV 0x13
+#define CH7006_PLLOV_N_8 8, 2:1
+#define CH7006_PLLOV_M_8 8, 0:0
+
+#define CH7006_PLLM 0x14
+#define CH7006_PLLM_0 0, 7:0
+
+#define CH7006_PLLN 0x15
+#define CH7006_PLLN_0 0, 7:0
+
+#define CH7006_BCLKOUT 0x17
+
+#define CH7006_SUBC_INC0 0x18
+#define CH7006_SUBC_INC0_28 28, 3:0
+
+#define CH7006_SUBC_INC1 0x19
+#define CH7006_SUBC_INC1_24 24, 3:0
+
+#define CH7006_SUBC_INC2 0x1a
+#define CH7006_SUBC_INC2_20 20, 3:0
+
+#define CH7006_SUBC_INC3 0x1b
+#define CH7006_SUBC_INC3_GPIO1_VAL (1 << 7)
+#define CH7006_SUBC_INC3_GPIO0_VAL (1 << 6)
+#define CH7006_SUBC_INC3_POUT_3_3V (1 << 5)
+#define CH7006_SUBC_INC3_POUT_INV (1 << 4)
+#define CH7006_SUBC_INC3_16 16, 3:0
+
+#define CH7006_SUBC_INC4 0x1c
+#define CH7006_SUBC_INC4_GPIO1_IN (1 << 7)
+#define CH7006_SUBC_INC4_GPIO0_IN (1 << 6)
+#define CH7006_SUBC_INC4_DS_INPUT (1 << 4)
+#define CH7006_SUBC_INC4_12 12, 3:0
+
+#define CH7006_SUBC_INC5 0x1d
+#define CH7006_SUBC_INC5_8 8, 3:0
+
+#define CH7006_SUBC_INC6 0x1e
+#define CH7006_SUBC_INC6_4 4, 3:0
+
+#define CH7006_SUBC_INC7 0x1f
+#define CH7006_SUBC_INC7_0 0, 3:0
+
+#define CH7006_PLL_CONTROL 0x20
+#define CH7006_PLL_CONTROL_CPI (1 << 5)
+#define CH7006_PLL_CONTROL_CAPACITOR (1 << 4)
+#define CH7006_PLL_CONTROL_7STAGES (1 << 3)
+#define CH7006_PLL_CONTROL_DIGITAL_5V (1 << 2)
+#define CH7006_PLL_CONTROL_ANALOG_5V (1 << 1)
+#define CH7006_PLL_CONTROL_MEMORY_5V (1 << 0)
+
+#define CH7006_CALC_SUBC_INC0 0x21
+#define CH7006_CALC_SUBC_INC0_24 24, 4:3
+#define CH7006_CALC_SUBC_INC0_HYST 0, 2:1
+#define CH7006_CALC_SUBC_INC0_AUTO (1 << 0)
+
+#define CH7006_CALC_SUBC_INC1 0x22
+#define CH7006_CALC_SUBC_INC1_16 16, 7:0
+
+#define CH7006_CALC_SUBC_INC2 0x23
+#define CH7006_CALC_SUBC_INC2_8 8, 7:0
+
+#define CH7006_CALC_SUBC_INC3 0x24
+#define CH7006_CALC_SUBC_INC3_0 0, 7:0
+
+#define CH7006_VERSION_ID 0x25
+
+#endif
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fa7b9be096b..9929f84ec3e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_lvds.o \
intel_bios.o \
intel_dp.o \
- intel_dp_i2c.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
+ intel_overlay.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 621815b531d..1184c14ba87 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (val != CH7017_DEVICE_ID_VALUE &&
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
- DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+ "Slave %d.\n",
val, i2cbus->adapter.name,dvo->slave_addr);
goto fail;
}
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
uint8_t horizontal_active_pixel_output, vertical_active_line_output;
uint8_t active_input_line_output;
- DRM_DEBUG("Registers before mode setting\n");
+ DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
/* Turn the LVDS back on with new settings. */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
- DRM_DEBUG("Registers after mode setting\n");
+ DRM_DEBUG_KMS("Registers after mode setting\n");
ch7017_dump_regs(dvo);
}
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
#define DUMP(reg) \
do { \
ch7017_read(dvo, reg, &val); \
- DRM_DEBUG(#reg ": %02x\n", val); \
+ DRM_DEBUG_KMS(#reg ": %02x\n", val); \
} while (0)
DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a9b89628968..d56ff5cc22b 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!ch7xxx->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!ch7xxx->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
- DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
goto out;
if (device != CH7xxx_DID) {
- DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
ch7xxx->quiet = false;
- DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
name, vendor, device);
return true;
out:
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
if ((i % 8) == 0 )
- DRM_DEBUG("\n %02X: ", i);
- DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
+ DRM_LOG_KMS("\n %02X: ", i);
+ DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
}
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index aa176f9921f..24169e528f0 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
};
if (!priv->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
return true;
if (!priv->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
* the address it's responding on.
*/
if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
- DRM_DEBUG("ivch detect failed due to address mismatch "
+ DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
"(%d vs %d)\n",
(temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
goto out;
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
uint16_t val;
ivch_read(dvo, VR00, &val);
- DRM_DEBUG("VR00: 0x%04x\n", val);
+ DRM_LOG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
- DRM_DEBUG("VR01: 0x%04x\n", val);
+ DRM_LOG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
- DRM_DEBUG("VR30: 0x%04x\n", val);
+ DRM_LOG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
- DRM_DEBUG("VR40: 0x%04x\n", val);
+ DRM_LOG_KMS("VR40: 0x%04x\n", val);
/* GPIO registers */
ivch_read(dvo, VR80, &val);
- DRM_DEBUG("VR80: 0x%04x\n", val);
+ DRM_LOG_KMS("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
- DRM_DEBUG("VR81: 0x%04x\n", val);
+ DRM_LOG_KMS("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
- DRM_DEBUG("VR82: 0x%04x\n", val);
+ DRM_LOG_KMS("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
- DRM_DEBUG("VR83: 0x%04x\n", val);
+ DRM_LOG_KMS("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
- DRM_DEBUG("VR84: 0x%04x\n", val);
+ DRM_LOG_KMS("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
- DRM_DEBUG("VR85: 0x%04x\n", val);
+ DRM_LOG_KMS("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
- DRM_DEBUG("VR86: 0x%04x\n", val);
+ DRM_LOG_KMS("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
- DRM_DEBUG("VR87: 0x%04x\n", val);
+ DRM_LOG_KMS("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
- DRM_DEBUG("VR88: 0x%04x\n", val);
+ DRM_LOG_KMS("VR88: 0x%04x\n", val);
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
- DRM_DEBUG("VR8E: 0x%04x\n", val);
+ DRM_LOG_KMS("VR8E: 0x%04x\n", val);
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
- DRM_DEBUG("VR8F: 0x%04x\n", val);
+ DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
static void ivch_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e1c1f7341e5..0001c13f0a8 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!sil->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!sil->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_VID & 0xff)) {
- DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_DID & 0xff)) {
- DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
- DRM_DEBUG("init sil164 dvo controller successfully!\n");
+ DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
return true;
out:
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
uint8_t val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
- DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
- DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
- DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
- DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
- DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 9ecc907384e..c7c391bc116 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!tfp->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!tfp->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
- DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+ "Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
- DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+ "Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
uint8_t val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
- DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
- DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
- DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
- DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
- DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
- DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
- DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
- DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
- DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
- DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
- DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
- DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 26bf0552b3c..18476bf0b58 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,7 @@
*/
#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -96,13 +97,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_gem_object *obj = obj_priv->obj;
- seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
+ seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
obj,
get_pin_flag(obj_priv),
obj->size,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno,
- obj_priv->dirty ? "dirty" : "");
+ obj_priv->dirty ? " dirty" : "",
+ obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -412,6 +414,109 @@ static int i915_registers_info(struct seq_file *m, void *data) {
return 0;
}
+static int
+i915_wedged_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_wedged_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof (buf),
+ "wedged : %d\n",
+ atomic_read(&dev_priv->mm.wedged));
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_wedged_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_INFO("Manually setting wedged to %d\n", val);
+
+ atomic_set(&dev_priv->mm.wedged, val);
+ if (val) {
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ queue_work(dev_priv->wq, &dev_priv->error_work);
+ }
+
+ return cnt;
+}
+
+static const struct file_operations i915_wedged_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_wedged_open,
+ .read = i915_wedged_read,
+ .write = i915_wedged_write,
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+ list_add(&node->list, &minor->debugfs_nodes.list);
+
+ return 0;
+}
+
+static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_wedged",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_wedged_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+}
static struct drm_info_list i915_debugfs_list[] = {
{"i915_regs", i915_registers_info, 0},
@@ -432,6 +537,12 @@ static struct drm_info_list i915_debugfs_list[] = {
int i915_debugfs_init(struct drm_minor *minor)
{
+ int ret;
+
+ ret = i915_wedged_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -441,7 +552,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
-
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e5b138be45f..701bfeac7f5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -807,6 +807,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -962,7 +968,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
+ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
@@ -1048,7 +1054,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
int gtt_offset, gtt_size;
if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1070,7 +1076,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
- DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+ DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
/* Mask out these reserved bits on this hardware. */
if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1102,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
phys =(entry & PTE_ADDRESS_MASK) |
((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
- DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+ DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
return phys;
}
@@ -1306,7 +1312,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
- if (!IS_IGD(dev))
+ if (!IS_PINEVIEW(dev))
return;
tmp = I915_READ(CLKCFG);
@@ -1413,7 +1419,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_iomapfree;
- dev_priv->wq = create_workqueue("i915");
+ dev_priv->wq = create_singlethread_workqueue("i915");
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -1434,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@@ -1489,9 +1495,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Must be done after probing outputs */
- /* FIXME: verify on IGDNG */
- if (!IS_IGDNG(dev))
- intel_opregion_init(dev, 0);
+ intel_opregion_init(dev, 0);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
@@ -1525,6 +1529,15 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->child_dev && dev_priv->child_dev_num) {
+ kfree(dev_priv->child_dev);
+ dev_priv->child_dev = NULL;
+ dev_priv->child_dev_num = 0;
+ }
drm_irq_uninstall(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1535,8 +1548,7 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
- if (!IS_IGDNG(dev))
- intel_opregion_free(dev, 0);
+ intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
@@ -1548,6 +1560,8 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
+
+ intel_cleanup_overlay(dev);
}
pci_dev_put(dev_priv->bridge_dev);
@@ -1656,6 +1670,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f436ec075f..2fa21786205 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -333,6 +333,7 @@ static struct drm_driver driver = {
.mmap = drm_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a725f659119..fbecac72f5b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -170,6 +170,8 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
+struct intel_overlay;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -187,6 +189,7 @@ typedef struct drm_i915_private {
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
+ struct drm_gem_object *pwrctx;
struct resource mch_res;
@@ -206,11 +209,13 @@ typedef struct drm_i915_private {
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on IGDNG,
+ /** splitted irq regs for graphics and display engine on Ironlake,
irq_mask_reg is still used for display irq. */
u32 gt_irq_mask_reg;
u32 gt_irq_enable_reg;
u32 de_irq_enable_reg;
+ u32 pch_irq_mask_reg;
+ u32 pch_irq_enable_reg;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -240,6 +245,9 @@ typedef struct drm_i915_private {
struct intel_opregion opregion;
+ /* overlay */
+ struct intel_overlay *overlay;
+
/* LVDS info */
int backlight_duty_cycle; /* restore backlight to this value */
bool panel_wants_dither;
@@ -258,7 +266,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
- int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
+ int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -280,6 +288,7 @@ typedef struct drm_i915_private {
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveRENDERSTANDBY;
+ u32 savePWRCTXA;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
@@ -374,8 +383,6 @@ typedef struct drm_i915_private {
u32 saveFDI_RXA_IMR;
u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
- u32 saveD_STATE;
- u32 saveDSPCLK_GATE_D;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
@@ -539,13 +546,21 @@ typedef struct drm_i915_private {
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
+ struct drm_crtc *plane_to_crtc_mapping[2];
+ struct drm_crtc *pipe_to_crtc_mapping[2];
+ wait_queue_head_t pending_flip_queue;
+
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
+ /* indicates the reduced downclock for LVDS*/
+ int lvds_downclock;
struct work_struct idle_work;
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
+ int child_dev_num;
+ struct child_device_config *child_dev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -638,6 +653,13 @@ struct drm_i915_gem_object {
* Advice: are the backing pages purgeable?
*/
int madv;
+
+ /**
+ * Number of crtcs where this object is currently the fb, but
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+ atomic_t pending_flip;
};
/**
@@ -738,6 +760,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void intel_enable_asle (struct drm_device *dev);
+
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -813,6 +837,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gem_idle(struct drm_device *dev);
+uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t flush_domains);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
@@ -824,6 +851,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
@@ -863,11 +891,13 @@ extern int i915_restore_state(struct drm_device *dev);
extern int intel_opregion_init(struct drm_device *dev, int resume);
extern void intel_opregion_free(struct drm_device *dev, int suspend);
extern void opregion_asle_intr(struct drm_device *dev);
+extern void ironlake_opregion_gse_intr(struct drm_device *dev);
extern void opregion_enable_asle(struct drm_device *dev);
#else
static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#endif
@@ -955,8 +985,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
@@ -990,47 +1020,51 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E42 || \
IS_GM45(dev))
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
(dev)->pci_device == 0x29D2 || \
- (IS_IGD(dev)))
+ (IS_PINEVIEW(dev)))
-#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IGDNG(dev))
+ IS_IRONLAKE(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_IGD(dev) || IS_IGDNG_M(dev))
+ IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IGDNG(dev))
+ IS_IRONLAKE(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
(IS_I9XX(dev) || IS_GM45(dev)) && \
- !IS_IGD(dev) && \
- !IS_IGDNG(dev))
+ !IS_PINEVIEW(dev) && \
+ !IS_IRONLAKE(dev))
+#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index abfc27b0c2e..8c463cf2050 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1288,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
DRM_ERROR("failed to add to map hash\n");
+ ret = -ENOMEM;
goto out_free_mm;
}
@@ -1309,7 +1310,7 @@ out_free_list:
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
*
- * Preserve the reservation of the mmaping with the DRM core code, but
+ * Preserve the reservation of the mmapping with the DRM core code, but
* relinquish ownership of the pages back to the system.
*
* It is vital that we remove the page mapping if we have mapped a tiled
@@ -1583,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
*
* Returned sequence numbers are nonzero on success.
*/
-static uint32_t
+uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t flush_domains)
{
@@ -1617,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
- DRM_DEBUG("%d\n", seqno);
+ DRM_DEBUG_DRIVER("%d\n", seqno);
request->seqno = seqno;
request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+int
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1837,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
return -EIO;
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ if (interruptible)
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+ atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+ atomic_read(&dev_priv->mm.wedged));
+
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
@@ -1879,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
return ret;
}
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+ return i915_do_wait_request(dev, seqno, 1);
+}
+
static void
i915_gem_flush(struct drm_device *dev,
uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
#endif
BEGIN_LP_RING(2);
OUT_RING(cmd);
- OUT_RING(0); /* noop */
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
}
@@ -2760,6 +2772,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
+void
+i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+{
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ i915_gem_object_flush_gtt_write_domain(obj);
+ break;
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_object_flush_cpu_write_domain(obj);
+ break;
+ default:
+ i915_gem_object_flush_gpu_write_domain(obj);
+ break;
+ }
+}
+
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -3525,6 +3553,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
return 0;
}
+static int
+i915_gem_wait_for_pending_flip(struct drm_device *dev,
+ struct drm_gem_object **object_list,
+ int count)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ DEFINE_WAIT(wait);
+ int i, ret = 0;
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->pending_flip_queue,
+ &wait, TASK_INTERRUPTIBLE);
+ for (i = 0; i < count; i++) {
+ obj_priv = object_list[i]->driver_private;
+ if (atomic_read(&obj_priv->pending_flip) > 0)
+ break;
+ }
+ if (i == count)
+ break;
+
+ if (!signal_pending(current)) {
+ mutex_unlock(&dev->struct_mutex);
+ schedule();
+ mutex_lock(&dev->struct_mutex);
+ continue;
+ }
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&dev_priv->pending_flip_queue, &wait);
+
+ return ret;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
- int pin_tries;
+ int pin_tries, flips;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
/* Copy in the exec list from userland */
- exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
- object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
if (exec_list == NULL || object_list == NULL) {
DRM_ERROR("Failed to allocate exec or object list "
"for %d buffers\n",
@@ -3598,20 +3661,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
if (atomic_read(&dev_priv->mm.wedged)) {
- DRM_ERROR("Execbuf while wedged\n");
mutex_unlock(&dev->struct_mutex);
ret = -EIO;
goto pre_mutex_err;
}
if (dev_priv->mm.suspended) {
- DRM_ERROR("Execbuf while VT-switched.\n");
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
/* Look up object handles */
+ flips = 0;
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@@ -3630,6 +3692,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
goto err;
}
obj_priv->in_execbuffer = true;
+ flips += atomic_read(&obj_priv->pending_flip);
+ }
+
+ if (flips > 0) {
+ ret = i915_gem_wait_for_pending_flip(dev, object_list,
+ args->buffer_count);
+ if (ret)
+ goto err;
}
/* Pin and relocate */
@@ -4356,7 +4426,7 @@ i915_gem_init_hws(struct drm_device *dev)
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
I915_READ(HWS_PGA); /* posting read */
- DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+ DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
}
@@ -4614,8 +4684,8 @@ i915_gem_load(struct drm_device *dev)
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
}
-
i915_gem_detect_bit_6_swizzle(dev);
+ init_waitqueue_head(&dev_priv->pending_flip_queue);
}
/*
@@ -4790,7 +4860,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
- DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
+ DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
ret = copy_from_user(obj_addr, user_data, args->size);
if (ret)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 200e398453c..30d6af6c09b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
- DRM_DEBUG("failed bus alloc: %d\n", ret);
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
goto out;
}
@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
bool need_disable;
- if (IS_IGDNG(dev)) {
- /* On IGDNG whatever DRAM config, GPU always do
+ if (IS_IRONLAKE(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aa7fd82aa6e..85f4c5de97e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -43,10 +43,13 @@
* we leave them always unmasked in IMR and then control enabling them through
* PIPESTAT alone.
*/
-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_FIX \
+ (I915_ASLE_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +64,7 @@
DRM_I915_VBLANK_PIPE_B)
void
-igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static inline void
-igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
/* For display hotplug interrupt */
void
-igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static inline void
-igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
dev_priv->irq_mask_reg |= mask;
@@ -157,6 +160,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
}
/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle (struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_display_irq(dev_priv, DE_GSE);
+ else
+ i915_enable_pipestat(dev_priv, 1,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+}
+
+/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %d\n", pipe);
return 0;
}
@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %d\n", pipe);
return 0;
}
@@ -250,12 +269,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_sysfs_hotplug_event(dev);
}
-irqreturn_t igdng_irq_handler(struct drm_device *dev)
+irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier;
- u32 new_de_iir, new_gt_iir;
+ u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 new_de_iir, new_gt_iir, new_pch_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
@@ -265,13 +284,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
+ pch_iir = I915_READ(SDEIIR);
for (;;) {
- if (de_iir == 0 && gt_iir == 0)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
break;
ret = IRQ_HANDLED;
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ new_pch_iir = I915_READ(SDEIIR);
+
I915_WRITE(DEIIR, de_iir);
new_de_iir = I915_READ(DEIIR);
I915_WRITE(GTIIR, gt_iir);
@@ -291,8 +315,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
DRM_WAKEUP(&dev_priv->irq_queue);
}
+ if (de_iir & DE_GSE)
+ ironlake_opregion_gse_intr(dev);
+
+ /* check event from PCH */
+ if ((de_iir & DE_PCH_EVENT) &&
+ (pch_iir & SDE_HOTPLUG_MASK)) {
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ }
+
de_iir = new_de_iir;
gt_iir = new_gt_iir;
+ pch_iir = new_pch_iir;
}
I915_WRITE(DEIER, de_ier);
@@ -317,19 +351,19 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
- DRM_DEBUG("generating error event\n");
+ DRM_DEBUG_DRIVER("generating error event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
if (IS_I965G(dev)) {
- DRM_DEBUG("resetting chip\n");
+ DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
if (!i965_reset(dev, GDRST_RENDER)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
} else {
- printk("reboot required\n");
+ DRM_DEBUG_DRIVER("reboot required\n");
}
}
}
@@ -355,7 +389,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
- DRM_DEBUG("out ot memory, not capturing error state\n");
+ DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
goto out;
}
@@ -512,7 +546,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- printk("i915: Waking up sleeping processes\n");
DRM_WAKEUP(&dev_priv->irq_queue);
}
@@ -535,8 +568,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&dev_priv->irq_received);
- if (IS_IGDNG(dev))
- return igdng_irq_handler(dev);
+ if (IS_IRONLAKE(dev))
+ return ironlake_irq_handler(dev);
iir = I915_READ(IIR);
@@ -568,14 +601,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
*/
if (pipea_stats & 0x8000ffff) {
if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG("pipe a underrun\n");
+ DRM_DEBUG_DRIVER("pipe a underrun\n");
I915_WRITE(PIPEASTAT, pipea_stats);
irq_received = 1;
}
if (pipeb_stats & 0x8000ffff) {
if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG("pipe b underrun\n");
+ DRM_DEBUG_DRIVER("pipe b underrun\n");
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
@@ -591,7 +624,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- DRM_DEBUG("hotplug event received, stat 0x%08x\n",
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
queue_work(dev_priv->wq,
@@ -599,27 +632,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
-
- /* EOS interrupts occurs */
- if (IS_IGD(dev) &&
- (hotplug_status & CRT_EOS_INT_STATUS)) {
- u32 temp;
-
- DRM_DEBUG("EOS interrupt occurs\n");
- /* status is already cleared */
- temp = I915_READ(ADPA);
- temp &= ~ADPA_DAC_ENABLE;
- I915_WRITE(ADPA, temp);
-
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp &= ~CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
-
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- }
}
I915_WRITE(IIR, iir);
@@ -641,14 +653,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
+ intel_finish_page_flip(dev, 0);
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
+ intel_finish_page_flip(dev, 1);
}
if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +704,7 @@ static int i915_emit_irq(struct drm_device * dev)
i915_kernel_lost_context(dev);
- DRM_DEBUG("\n");
+ DRM_DEBUG_DRIVER("\n");
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +729,8 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (IS_IGDNG(dev))
- igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -725,8 +745,8 @@ void i915_user_irq_put(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IGDNG(dev))
- igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ if (IS_IRONLAKE(dev))
+ ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -749,7 +769,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,7 +852,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return 0;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -854,7 +874,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -868,7 +888,7 @@ void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -976,7 +996,7 @@ void i915_hangcheck_elapsed(unsigned long data)
/* drm_dma.h hooks
*/
-static void igdng_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -992,14 +1012,21 @@ static void igdng_irq_preinstall(struct drm_device *dev)
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
(void) I915_READ(GTIER);
+
+ /* south display irq */
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ (void) I915_READ(SDEIER);
}
-static int igdng_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
u32 render_mask = GT_USER_INTERRUPT;
+ u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
dev_priv->de_irq_enable_reg = display_mask;
@@ -1019,6 +1046,14 @@ static int igdng_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
+ dev_priv->pch_irq_mask_reg = ~hotplug_mask;
+ dev_priv->pch_irq_enable_reg = hotplug_mask;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
+ I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
+ (void) I915_READ(SDEIER);
+
return 0;
}
@@ -1031,8 +1066,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_IGDNG(dev)) {
- igdng_irq_preinstall(dev);
+ if (IS_IRONLAKE(dev)) {
+ ironlake_irq_preinstall(dev);
return;
}
@@ -1059,8 +1094,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- if (IS_IGDNG(dev))
- return igdng_irq_postinstall(dev);
+ if (IS_IRONLAKE(dev))
+ return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1120,7 +1155,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void igdng_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1178,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
- if (IS_IGDNG(dev)) {
- igdng_irq_uninstall(dev);
+ if (IS_IRONLAKE(dev)) {
+ ironlake_irq_uninstall(dev);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 2d5193556d3..7cc8410239c 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -118,6 +118,10 @@ struct opregion_asle {
#define ASLE_BACKLIGHT_FAIL (2<<12)
#define ASLE_PFIT_FAIL (2<<14)
#define ASLE_PWM_FREQ_FAIL (2<<16)
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
else {
- if (IS_IGD(dev)) {
+ if (IS_PINEVIEW(dev)) {
blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
- DRM_DEBUG("non asle set request??\n");
+ DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
+static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 cpu_pwm_ctl, pch_pwm_ctl2;
+ u32 max_backlight, level;
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp < 0 || bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
+ pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ /* get the max PWM frequency */
+ max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
+ /* calculate the expected PMW frequency */
+ level = (bclp * max_backlight) / 255;
+ /* reserve the high 16 bits */
+ cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
+ /* write the updated PWM frequency */
+ I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
+
+ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+void ironlake_opregion_gse_intr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+
+ if (!asle_req) {
+ DRM_DEBUG_DRIVER("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_ALS_ILLUM) {
+ DRM_DEBUG_DRIVER("Illum is not supported\n");
+ asle_stat |= ASLE_ALS_ILLUM_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+
+ if (asle_req & ASLE_SET_PFIT) {
+ DRM_DEBUG_DRIVER("Pfit is not supported\n");
+ asle_stat |= ASLE_PFIT_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_PWM_FREQ) {
+ DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+ asle_stat |= ASLE_PWM_FREQ_FAILED;
+ }
+
+ asle->aslc = asle_stat;
+}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 1,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ intel_enable_asle(dev);
spin_unlock_irqrestore(&dev_priv->user_irq_lock,
irqflags);
}
@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
- DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+ DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
- DRM_DEBUG("ACPI OpRegion not supported!\n");
+ DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
opregion->header = base;
if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG("opregion signature mismatch\n");
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- DRM_DEBUG("Public ACPI methods supported\n");
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_didl_outputs(dev);
} else {
- DRM_DEBUG("Public ACPI methods not supported\n");
+ DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
err = -ENOTSUPP;
goto err_out;
}
opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG("SWSCI supported\n");
+ DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
}
if (mboxes & MBOX_ASLE) {
- DRM_DEBUG("ASLE supported\n");
+ DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion_enable_asle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1687edf6879..974b3cf7061 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -140,6 +140,7 @@
#define MI_NOOP MI_INSTR(0, 0)
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +152,13 @@
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
+#define MI_OVERLAY_CONTINUE (0x0<<21)
+#define MI_OVERLAY_ON (0x1<<21)
+#define MI_OVERLAY_OFF (0x2<<21)
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -260,6 +267,8 @@
#define HWS_PGA 0x02080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
+#define PWRCTXA 0x2088 /* 965GM+ only */
+#define PWRCTX_EN (1<<0)
#define IPEIR 0x02088
#define IPEHR 0x0208c
#define INSTDONE 0x02090
@@ -405,6 +414,13 @@
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+#define GMBUS0 0x5100
+#define GMBUS1 0x5104
+#define GMBUS2 0x5108
+#define GMBUS3 0x510c
+#define GMBUS4 0x5110
+#define GMBUS5 0x5120
+
/*
* Clock control & power management
*/
@@ -435,7 +451,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -512,7 +528,7 @@
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
/* i830, required in DVO non-gang */
#define PLL_P2_DIVIDE_BY_4 (1 << 23)
#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -522,7 +538,7 @@
#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
#define PLL_REF_INPUT_MASK (3 << 13)
#define PLL_LOAD_PULSE_PHASE_SHIFT 9
-/* IGDNG */
+/* Ironlake */
# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
@@ -586,12 +602,12 @@
#define FPB0 0x06048
#define FPB1 0x0604c
#define FP_N_DIV_MASK 0x003f0000
-#define FP_N_IGD_DIV_MASK 0x00ff0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
#define FP_M1_DIV_MASK 0x00003f00
#define FP_M1_DIV_SHIFT 8
#define FP_M2_DIV_MASK 0x0000003f
-#define FP_M2_IGD_DIV_MASK 0x000000ff
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST 0x606c
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -769,7 +785,8 @@
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
-
+#define RCX_SW_EXIT (1<<23)
+#define RSX_STATUS_MASK 0x00700000
#define PEG_BAND_GAP_DATA 0x14d68
/*
@@ -844,7 +861,6 @@
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
-#define CRT_EOS_INT_EN (1 << 10)
#define CRT_HOTPLUG_INT_EN (1 << 9)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
@@ -868,7 +884,6 @@
HDMID_HOTPLUG_INT_EN | \
SDVOB_HOTPLUG_INT_EN | \
SDVOC_HOTPLUG_INT_EN | \
- TV_HOTPLUG_INT_EN | \
CRT_HOTPLUG_INT_EN)
@@ -879,7 +894,6 @@
#define DPC_HOTPLUG_INT_STATUS (1 << 28)
#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (1 << 27)
-#define CRT_EOS_INT_STATUS (1 << 12)
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -1620,7 +1634,7 @@
#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
#define DP_SCRAMBLING_DISABLE (1 << 12)
-#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
/** limit RGB values to avoid confusing TVs */
#define DP_COLOR_RANGE_16_235 (1 << 8)
@@ -1808,7 +1822,7 @@
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
-#define IGD_SELF_REFRESH_EN (1<<30)
+#define PINEVIEW_SELF_REFRESH_EN (1<<30)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -1824,16 +1838,16 @@
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
-#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
-#define IGD_FIFO_LINE_SIZE 64
-#define IGD_MAX_WM 0x1ff
-#define IGD_DFT_WM 0x3f
-#define IGD_DFT_HPLLOFF_WM 0
-#define IGD_GUARD_WM 10
-#define IGD_CURSOR_FIFO 64
-#define IGD_CURSOR_MAX_WM 0x3f
-#define IGD_CURSOR_DFT_WM 0
-#define IGD_CURSOR_GUARD_WM 5
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
/*
* The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +1921,7 @@
#define DISPPLANE_16BPP (0x5<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_MASK (1<<24)
@@ -1918,7 +1933,7 @@
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define DSPAADDR 0x70184
#define DSPASTRIDE 0x70188
@@ -1971,7 +1986,7 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
-/* IGDNG */
+/* Ironlake */
#define CPU_VGACNTRL 0x41000
@@ -2117,6 +2132,7 @@
#define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (0xf << 8)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2157,6 +2173,13 @@
#define PCH_GPIOE 0xc5020
#define PCH_GPIOF 0xc5024
+#define PCH_GMBUS0 0xc5100
+#define PCH_GMBUS1 0xc5104
+#define PCH_GMBUS2 0xc5108
+#define PCH_GMBUS3 0xc510c
+#define PCH_GMBUS4 0xc5110
+#define PCH_GMBUS5 0xc5120
+
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
@@ -2292,7 +2315,7 @@
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
#define FDI_DP_PORT_WIDTH_X4 (3<<19)
#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
-/* IGDNG: hardwired to 1 */
+/* Ironlake: hardwired to 1 */
#define FDI_TX_PLL_ENABLE (1<<14)
/* both Tx and Rx */
#define FDI_SCRAMBLING_ENABLE (0<<7)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6eec8171a44..d5ebb00a9d4 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -27,14 +27,14 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
-#include "i915_drv.h"
+#include "intel_drv.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev)
}
/* VGA state */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -733,8 +733,10 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
+ if (I915_HAS_RC6(dev)) {
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+ dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
+ }
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -742,7 +744,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
@@ -754,10 +756,6 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- /* Clock gating state */
- dev_priv->saveD_STATE = I915_READ(D_STATE);
- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
-
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -796,8 +794,10 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
+ if (I915_HAS_RC6(dev)) {
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+ I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
+ }
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -817,7 +817,7 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +830,7 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
- I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+ intel_init_clock_gating(dev);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +845,9 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ /* I2C state */
+ intel_i2c_reset_gmbus(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96cd256e60e..f2756774758 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,6 +114,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
int lfp_data_size, dvo_timing_offset;
+ int i, temp_downclock;
+ struct drm_display_mode *temp_mode;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
@@ -159,9 +161,49 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
+ temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
+ temp_downclock = panel_fixed_mode->clock;
+ /*
+ * enumerate the LVDS panel timing info entry in VBT to check whether
+ * the LVDS downclock is found.
+ */
+ for (i = 0; i < 16; i++) {
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
+ dvo_timing = (struct lvds_dvo_timing *)
+ ((unsigned char *)entry + dvo_timing_offset);
+
+ fill_detail_timing_data(temp_mode, dvo_timing);
+
+ if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
+ temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
+ temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
+ temp_mode->htotal == panel_fixed_mode->htotal &&
+ temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
+ temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
+ temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
+ temp_mode->vtotal == panel_fixed_mode->vtotal &&
+ temp_mode->clock < temp_downclock) {
+ /*
+ * downclock is already found. But we expect
+ * to find the lower downclock.
+ */
+ temp_downclock = temp_mode->clock;
+ }
+ /* clear it to zero */
+ memset(temp_mode, 0, sizeof(*temp_mode));
+ }
+ kfree(temp_mode);
+ if (temp_downclock < panel_fixed_mode->clock) {
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ temp_downclock, panel_fixed_mode->clock);
+ }
return;
}
@@ -217,7 +259,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IGDNG(dev_priv->dev))
+ else if (IS_IRONLAKE(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
@@ -241,22 +283,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
GPIOF,
};
- /* Set sensible defaults in case we can't find the general block
- or it is the wrong chipset */
- dev_priv->crt_ddc_bus = -1;
-
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
u16 block_size = get_blocksize(general);
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
- DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if ((bus_pin >= 1) && (bus_pin <= 6)) {
dev_priv->crt_ddc_bus =
crt_bus_map_table[bus_pin-1];
}
} else {
- DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
+ DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
}
}
@@ -274,7 +312,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -284,7 +322,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
- DRM_DEBUG("different child size is found. Invalid.\n");
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
return;
}
/* get the block size of general definitions */
@@ -310,11 +348,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
p_child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG("Incorrect SDVO port. Skip it \n");
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
continue;
}
- DRM_DEBUG("the SDVO device with slave addr %2x is found on "
- "%s port\n",
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
p_child->slave_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
@@ -325,21 +363,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->initialized = 1;
} else {
- DRM_DEBUG("Maybe one SDVO port is shared by "
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
if (p_child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG("there exists the slave2_addr. Maybe this "
- "is a SDVO device with multiple inputs.\n");
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
}
count++;
}
if (!count) {
/* No SDVO device info is found */
- DRM_DEBUG("No SDVO device info is found in VBT\n");
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
}
return;
}
@@ -366,6 +404,70 @@ parse_driver_features(struct drm_i915_private *dev_priv,
dev_priv->render_reclock_avail = true;
}
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child device that is present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+ return;
+ }
+ dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child device\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
/**
* intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -417,6 +519,7 @@ intel_init_bios(struct drm_device *dev)
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 0f8e5f69ac7..425ac9d7f72 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *dev);
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e5051446c48..9f3d3e56341 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = PCH_ADPA;
else
reg = ADPA;
@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
}
I915_WRITE(reg, temp);
-
- if (IS_IGD(dev)) {
- if (mode == DRM_MODE_DPMS_OFF) {
- /* turn off DAC */
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp &= ~CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
-
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- } else {
- /* turn on DAC. EOS interrupt must be enabled after DAC
- * is enabled, so it sounds not good to enable it in
- * i915_driver_irq_postinstall()
- * wait 12.5ms after DAC is enabled
- */
- msleep(13);
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp |= CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
- }
- }
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
else
dpll_md_reg = DPLL_B_MD;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, 0);
}
I915_WRITE(adpa_reg, adpa);
}
-static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,7 +166,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
ADPA_CRT_HOTPLUG_ENABLE |
ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
- DRM_DEBUG("pch crt adpa 0x%x", adpa);
+ DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +199,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
u32 hotplug_en;
int i, tries = 0;
- if (IS_IGDNG(dev))
- return intel_igdng_crt_detect_hotplug(connector);
+ if (IS_IRONLAKE(dev))
+ return intel_ironlake_crt_detect_hotplug(connector);
/*
* On 4 series desktop, CRT detect sequence need to be done twice
@@ -549,12 +521,12 @@ void intel_crt_init(struct drm_device *dev)
&intel_output->enc);
/* Set up the DDC bus. */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
i2c_reg = PCH_GPIOA;
else {
i2c_reg = GPIOA;
/* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != -1)
+ if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}
intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 099f420de57..52cd9b006da 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -32,7 +32,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
@@ -102,32 +102,32 @@ struct intel_limit {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
-#define IGD_VCO_MIN 1700000
-#define IGD_VCO_MAX 3500000
+#define PINEVIEW_VCO_MIN 1700000
+#define PINEVIEW_VCO_MAX 3500000
#define I9XX_N_MIN 1
#define I9XX_N_MAX 6
-/* IGD's Ncounter is a ring counter */
-#define IGD_N_MIN 3
-#define IGD_N_MAX 6
+/* Pineview's Ncounter is a ring counter */
+#define PINEVIEW_N_MIN 3
+#define PINEVIEW_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
-#define IGD_M_MIN 2
-#define IGD_M_MAX 256
+#define PINEVIEW_M_MIN 2
+#define PINEVIEW_M_MAX 256
#define I9XX_M1_MIN 10
#define I9XX_M1_MAX 22
#define I9XX_M2_MIN 5
#define I9XX_M2_MAX 9
-/* IGD M1 is reserved, and must be 0 */
-#define IGD_M1_MIN 0
-#define IGD_M1_MAX 0
-#define IGD_M2_MIN 0
-#define IGD_M2_MAX 254
+/* Pineview M1 is reserved, and must be 0 */
+#define PINEVIEW_M1_MIN 0
+#define PINEVIEW_M1_MAX 0
+#define PINEVIEW_M2_MIN 0
+#define PINEVIEW_M2_MAX 254
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
#define I9XX_P_LVDS_MAX 98
-#define IGD_P_LVDS_MIN 7
-#define IGD_P_LVDS_MAX 112
+#define PINEVIEW_P_LVDS_MIN 7
+#define PINEVIEW_P_LVDS_MAX 112
#define I9XX_P1_MIN 1
#define I9XX_P1_MAX 8
#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -234,33 +234,33 @@ struct intel_limit {
#define G4X_P2_DISPLAY_PORT_FAST 10
#define G4X_P2_DISPLAY_PORT_LIMIT 0
-/* IGDNG */
+/* Ironlake */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
*/
-#define IGDNG_DOT_MIN 25000
-#define IGDNG_DOT_MAX 350000
-#define IGDNG_VCO_MIN 1760000
-#define IGDNG_VCO_MAX 3510000
-#define IGDNG_N_MIN 1
-#define IGDNG_N_MAX 5
-#define IGDNG_M_MIN 79
-#define IGDNG_M_MAX 118
-#define IGDNG_M1_MIN 12
-#define IGDNG_M1_MAX 23
-#define IGDNG_M2_MIN 5
-#define IGDNG_M2_MAX 9
-#define IGDNG_P_SDVO_DAC_MIN 5
-#define IGDNG_P_SDVO_DAC_MAX 80
-#define IGDNG_P_LVDS_MIN 28
-#define IGDNG_P_LVDS_MAX 112
-#define IGDNG_P1_MIN 1
-#define IGDNG_P1_MAX 8
-#define IGDNG_P2_SDVO_DAC_SLOW 10
-#define IGDNG_P2_SDVO_DAC_FAST 5
-#define IGDNG_P2_LVDS_SLOW 14 /* single channel */
-#define IGDNG_P2_LVDS_FAST 7 /* double channel */
-#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */
+#define IRONLAKE_DOT_MIN 25000
+#define IRONLAKE_DOT_MAX 350000
+#define IRONLAKE_VCO_MIN 1760000
+#define IRONLAKE_VCO_MAX 3510000
+#define IRONLAKE_N_MIN 1
+#define IRONLAKE_N_MAX 5
+#define IRONLAKE_M_MIN 79
+#define IRONLAKE_M_MAX 118
+#define IRONLAKE_M1_MIN 12
+#define IRONLAKE_M1_MAX 23
+#define IRONLAKE_M2_MIN 5
+#define IRONLAKE_M2_MAX 9
+#define IRONLAKE_P_SDVO_DAC_MIN 5
+#define IRONLAKE_P_SDVO_DAC_MAX 80
+#define IRONLAKE_P_LVDS_MIN 28
+#define IRONLAKE_P_LVDS_MAX 112
+#define IRONLAKE_P1_MIN 1
+#define IRONLAKE_P1_MAX 8
+#define IRONLAKE_P2_SDVO_DAC_SLOW 10
+#define IRONLAKE_P2_SDVO_DAC_FAST 5
+#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
+#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
+#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -272,15 +272,15 @@ static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -453,13 +453,13 @@ static const intel_limit_t intel_limits_g4x_display_port = {
.find_pll = intel_find_pll_g4x_dp,
};
-static const intel_limit_t intel_limits_igd_sdvo = {
+static const intel_limit_t intel_limits_pineview_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
- .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
- .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
- .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
- .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
- .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
+ .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
+ .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
+ .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
+ .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
+ .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
.p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
@@ -468,59 +468,59 @@ static const intel_limit_t intel_limits_igd_sdvo = {
.find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_igd_lvds = {
+static const intel_limit_t intel_limits_pineview_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
- .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
- .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
- .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
- .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
- .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
- .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
+ .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
+ .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
+ .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
+ .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
+ .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
+ .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
- /* IGD only supports single-channel mode. */
+ /* Pineview only supports single-channel mode. */
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
.find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_igdng_sdvo = {
- .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
- .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
- .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
- .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
- .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
- .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
- .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX },
- .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
- .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
- .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
- .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
- .find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_sdvo = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
+ .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
+ .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
+ .find_pll = intel_ironlake_find_best_PLL,
};
-static const intel_limit_t intel_limits_igdng_lvds = {
- .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
- .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
- .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
- .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
- .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
- .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
- .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX },
- .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
- .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
- .p2_slow = IGDNG_P2_LVDS_SLOW,
- .p2_fast = IGDNG_P2_LVDS_FAST },
- .find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_lvds = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
+ .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_P2_LVDS_SLOW,
+ .p2_fast = IRONLAKE_P2_LVDS_FAST },
+ .find_pll = intel_ironlake_find_best_PLL,
};
-static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_igdng_lvds;
+ limit = &intel_limits_ironlake_lvds;
else
- limit = &intel_limits_igdng_sdvo;
+ limit = &intel_limits_ironlake_sdvo;
return limit;
}
@@ -557,20 +557,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_IGDNG(dev))
- limit = intel_igdng_limit(crtc);
+ if (IS_IRONLAKE(dev))
+ limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
- } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
+ } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
- } else if (IS_IGD(dev)) {
+ } else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_igd_lvds;
+ limit = &intel_limits_pineview_lvds;
else
- limit = &intel_limits_igd_sdvo;
+ limit = &intel_limits_pineview_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
@@ -580,8 +580,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
return limit;
}
-/* m1 is reserved as 0 in IGD, n is a ring counter */
-static void igd_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -591,8 +591,8 @@ static void igd_clock(int refclk, intel_clock_t *clock)
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
{
- if (IS_IGD(dev)) {
- igd_clock(refclk, clock);
+ if (IS_PINEVIEW(dev)) {
+ pineview_clock(refclk, clock);
return;
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
@@ -657,7 +657,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
- if (clock->m1 <= clock->m2 && !IS_IGD(dev))
+ if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
@@ -706,16 +706,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset (best_clock, 0, sizeof (*best_clock));
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in IGD */
- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
- break;
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
intel_clock(dev, refclk, &clock);
@@ -751,8 +752,8 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in IGD */
- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
break;
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
@@ -833,8 +834,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
@@ -857,8 +858,8 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -871,7 +872,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- return intel_find_pll_igdng_dp(limit, crtc, target,
+ return intel_find_pll_ironlake_dp(limit, crtc, target,
refclk, best_clock);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -949,7 +950,7 @@ void
intel_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
+ msleep(20);
}
/* Parameters have changed, update FBC info */
@@ -994,7 +995,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
}
@@ -1017,7 +1018,7 @@ void i8xx_disable_fbc(struct drm_device *dev)
intel_wait_for_vblank(dev);
- DRM_DEBUG("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
}
static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
@@ -1062,7 +1063,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
- DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
void g4x_disable_fbc(struct drm_device *dev)
@@ -1076,7 +1077,7 @@ void g4x_disable_fbc(struct drm_device *dev)
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
intel_wait_for_vblank(dev);
- DRM_DEBUG("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
}
static bool g4x_fbc_enabled(struct drm_crtc *crtc)
@@ -1141,25 +1142,27 @@ static void intel_update_fbc(struct drm_crtc *crtc,
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
if (intel_fb->obj->size > dev_priv->cfb_size) {
- DRM_DEBUG("framebuffer too large, disabling compression\n");
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
goto out_disable;
}
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG("mode incompatible with compression, disabling\n");
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
goto out_disable;
}
if ((mode->hdisplay > 2048) ||
(mode->vdisplay > 1536)) {
- DRM_DEBUG("mode too large for compression, disabling\n");
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
- DRM_DEBUG("plane not 0, disabling compression\n");
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
goto out_disable;
}
if (obj_priv->tiling_mode != I915_TILING_X) {
- DRM_DEBUG("framebuffer not tiled, disabling compression\n");
+ DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
goto out_disable;
}
@@ -1181,13 +1184,57 @@ static void intel_update_fbc(struct drm_crtc *crtc,
return;
out_disable:
- DRM_DEBUG("unsupported config, disabling FBC\n");
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
if (dev_priv->display.fbc_enabled(crtc))
dev_priv->display.disable_fbc(dev);
}
static int
+intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ u32 alignment;
+ int ret;
+
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_NONE:
+ alignment = 64 * 1024;
+ break;
+ case I915_TILING_X:
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ break;
+ case I915_TILING_Y:
+ /* FIXME: Is this true? */
+ DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ return -EINVAL;
+ default:
+ BUG();
+ }
+
+ ret = i915_gem_object_pin(obj, alignment);
+ if (ret != 0)
+ return ret;
+
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always install
+ * a fence as the cost is not that onerous.
+ */
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ obj_priv->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -1206,12 +1253,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- u32 dspcntr, alignment;
+ u32 dspcntr;
int ret;
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -1228,24 +1275,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
obj = intel_fb->obj;
obj_priv = obj->driver_private;
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- alignment = 64 * 1024;
- break;
- case I915_TILING_X:
- /* pin() will align the object as required by fence */
- alignment = 0;
- break;
- case I915_TILING_Y:
- /* FIXME: Is this true? */
- DRM_ERROR("Y tiled not allowed for scan out buffers\n");
- return -EINVAL;
- default:
- BUG();
- }
-
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, alignment);
+ ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1258,20 +1289,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
- * whereas 965+ only requires a fence if using framebuffer compression.
- * For simplicity, we always install a fence as the cost is not that onerous.
- */
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
-
dspcntr = I915_READ(dspcntr_reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -1287,7 +1304,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
break;
case 24:
case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ if (crtc->fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
DRM_ERROR("Unknown color depth\n");
@@ -1302,7 +1322,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1311,7 +1331,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start = obj_priv->gtt_offset;
Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
- DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
I915_WRITE(dspbase, Offset);
@@ -1363,7 +1383,7 @@ static void i915_disable_vga (struct drm_device *dev)
u8 sr1;
u32 vga_reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
@@ -1379,19 +1399,19 @@ static void i915_disable_vga (struct drm_device *dev)
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
}
-static void igdng_disable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
}
-static void igdng_enable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1404,13 +1424,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
}
-static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG("eDP PLL enable for clock %d\n", clock);
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
@@ -1440,7 +1460,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
-static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1481,10 +1501,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG("crtc %d dpms on\n", pipe);
+ DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0) {
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ POSTING_READ(PCH_LVDS);
+ }
+ }
+
if (HAS_eDP) {
/* enable eDP PLL */
- igdng_enable_pll_edp(crtc);
+ ironlake_enable_pll_edp(crtc);
} else {
/* enable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
@@ -1501,7 +1530,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(fdi_rx_reg);
udelay(200);
- /* Enable CPU FDI TX PLL, always on for IGDNG */
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
temp = I915_READ(fdi_tx_reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
@@ -1568,12 +1597,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK) == 0) {
for (j = 0; j < tries; j++) {
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+ temp);
if (temp & FDI_RX_BIT_LOCK)
break;
udelay(200);
@@ -1582,11 +1612,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_BIT_LOCK);
else
- DRM_DEBUG("train 1 fail\n");
+ DRM_DEBUG_KMS("train 1 fail\n");
} else {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG("train 1 ok 2!\n");
+ DRM_DEBUG_KMS("train 1 ok 2!\n");
}
temp = I915_READ(fdi_tx_reg);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1601,12 +1631,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
for (j = 0; j < tries; j++) {
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+ temp);
if (temp & FDI_RX_SYMBOL_LOCK)
break;
udelay(200);
@@ -1614,15 +1645,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
if (j != tries) {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG("train 2 ok 1!\n");
+ DRM_DEBUG_KMS("train 2 ok 1!\n");
} else
- DRM_DEBUG("train 2 fail\n");
+ DRM_DEBUG_KMS("train 2 fail\n");
} else {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG("train 2 ok 2!\n");
+ DRM_DEBUG_KMS("train 2 ok 2!\n");
}
- DRM_DEBUG("train done\n");
+ DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1664,9 +1695,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
case DRM_MODE_DPMS_OFF:
- DRM_DEBUG("crtc %d dpms off\n", pipe);
-
- i915_disable_vga(dev);
+ DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
@@ -1677,6 +1706,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
+ i915_disable_vga(dev);
+
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -1690,16 +1721,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(500);
continue;
} else {
- DRM_DEBUG("pipe %d off delay\n", pipe);
+ DRM_DEBUG_KMS("pipe %d off delay\n",
+ pipe);
break;
}
}
} else
- DRM_DEBUG("crtc %d is disabled\n", pipe);
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
- if (HAS_eDP) {
- igdng_disable_pll_edp(crtc);
+ udelay(100);
+
+ /* Disable PF */
+ temp = I915_READ(pf_ctl_reg);
+ if ((temp & PF_ENABLE) != 0) {
+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ I915_READ(pf_ctl_reg);
}
+ I915_WRITE(pf_win_size, 0);
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1725,6 +1763,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(100);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
+ I915_READ(PCH_LVDS);
+ udelay(100);
+ }
+
/* disable PCH transcoder */
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
@@ -1738,12 +1783,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(500);
continue;
} else {
- DRM_DEBUG("transcoder %d off delay\n", pipe);
+ DRM_DEBUG_KMS("transcoder %d off "
+ "delay\n", pipe);
break;
}
}
}
+ udelay(100);
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -1751,14 +1799,20 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_rx_reg);
- if ((temp & FDI_RX_PLL_ENABLE) != 0) {
- temp &= ~FDI_SEL_PCDCLK;
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
+ if (HAS_eDP) {
+ ironlake_disable_pll_edp(crtc);
}
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_SEL_PCDCLK;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+
/* Disable CPU FDI TX PLL */
temp = I915_READ(fdi_tx_reg);
if ((temp & FDI_TX_PLL_ENABLE) != 0) {
@@ -1767,20 +1821,43 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(100);
}
- /* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
- }
- I915_WRITE(pf_win_size, 0);
-
/* Wait for the clocks to turn off. */
- udelay(150);
+ udelay(100);
break;
}
}
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ struct intel_overlay *overlay;
+ int ret;
+
+ if (!enable && intel_crtc->overlay) {
+ overlay = intel_crtc->overlay;
+ mutex_lock(&overlay->dev->struct_mutex);
+ for (;;) {
+ ret = intel_overlay_switch_off(overlay);
+ if (ret == 0)
+ break;
+
+ ret = intel_overlay_recover_from_interrupt(overlay, 0);
+ if (ret != 0) {
+ /* overlay doesn't react anymore. Usually
+ * results in a black screen and an unkillable
+ * X server. */
+ BUG();
+ overlay->hw_wedged = HW_WEDGED;
+ break;
+ }
+ }
+ mutex_unlock(&overlay->dev->struct_mutex);
+ }
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway. */
+
+ return;
+}
+
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1839,12 +1916,14 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_update_fbc(crtc, &crtc->mode);
/* Give the overlay scaler a chance to enable if it's on this pipe */
- //intel_crtc_dpms_video(crtc, true); TODO
+ intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
intel_update_watermarks(dev);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
- //intel_crtc_dpms_video(crtc, FALSE); TODO
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ drm_vblank_off(dev, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
@@ -1963,7 +2042,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
@@ -2039,7 +2118,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int intel_panel_fitter_pipe (struct drm_device *dev)
+int intel_panel_fitter_pipe (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control;
@@ -2083,9 +2162,8 @@ fdi_reduce_ratio(u32 *num, u32 *den)
#define LINK_N 0x80000
static void
-igdng_compute_m_n(int bits_per_pixel, int nlanes,
- int pixel_clock, int link_clock,
- struct fdi_m_n *m_n)
+ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
+ int link_clock, struct fdi_m_n *m_n)
{
u64 temp;
@@ -2113,34 +2191,34 @@ struct intel_watermark_params {
unsigned long cacheline_size;
};
-/* IGD has different values for various configs */
-static struct intel_watermark_params igd_display_wm = {
- IGD_DISPLAY_FIFO,
- IGD_MAX_WM,
- IGD_DFT_WM,
- IGD_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+/* Pineview has different values for various configs */
+static struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params igd_display_hplloff_wm = {
- IGD_DISPLAY_FIFO,
- IGD_MAX_WM,
- IGD_DFT_HPLLOFF_WM,
- IGD_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params igd_cursor_wm = {
- IGD_CURSOR_FIFO,
- IGD_CURSOR_MAX_WM,
- IGD_CURSOR_DFT_WM,
- IGD_CURSOR_GUARD_WM,
- IGD_FIFO_LINE_SIZE,
+static struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params igd_cursor_hplloff_wm = {
- IGD_CURSOR_FIFO,
- IGD_CURSOR_MAX_WM,
- IGD_CURSOR_DFT_WM,
- IGD_CURSOR_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
static struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
@@ -2213,11 +2291,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1000;
entries_required /= wm->cacheline_size;
- DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
+ DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
wm_size = wm->fifo_size - (entries_required + wm->guard_size);
- DRM_DEBUG("FIFO watermark level: %d\n", wm_size);
+ DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
@@ -2279,50 +2357,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
return latency;
}
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
return NULL;
}
-static void igd_disable_cxsr(struct drm_device *dev)
+static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
/* deactivate cxsr */
reg = I915_READ(DSPFW3);
- reg &= ~(IGD_SELF_REFRESH_EN);
+ reg &= ~(PINEVIEW_SELF_REFRESH_EN);
I915_WRITE(DSPFW3, reg);
DRM_INFO("Big FIFO is disabled\n");
}
-static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
+static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
unsigned long wm;
struct cxsr_latency *latency;
- latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq,
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
dev_priv->mem_freq);
if (!latency) {
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
- igd_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
return;
}
/* Display SR */
- wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= 0x7fffff;
reg |= wm << 23;
I915_WRITE(DSPFW1, reg);
- DRM_DEBUG("DSPFW1 register is %x\n", reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~(0x3f << 24);
@@ -2330,7 +2408,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &igd_display_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
reg = I915_READ(DSPFW3);
reg &= 0xfffffe00;
@@ -2338,17 +2416,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~(0x3f << 16);
reg |= (wm & 0x3f) << 16;
I915_WRITE(DSPFW3, reg);
- DRM_DEBUG("DSPFW3 register is %x\n", reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
reg = I915_READ(DSPFW3);
- reg |= IGD_SELF_REFRESH_EN;
+ reg |= PINEVIEW_SELF_REFRESH_EN;
I915_WRITE(DSPFW3, reg);
DRM_INFO("Big FIFO is enabled\n");
@@ -2384,8 +2462,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
(dsparb & 0x7f);
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2403,8 +2481,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
(dsparb & 0x1ff);
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2418,7 +2496,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
size);
return size;
@@ -2433,8 +2512,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2509,15 +2588,39 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
- int unused3, int unused4)
+static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long line_time_us;
+ int sr_clock, sr_entries, srwm = 1;
+
+ /* Calc sr entries for one plane configs */
+ if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ /* self-refresh has much higher latency */
+ const static int sr_latency_ns = 12000;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ sr_entries = (((sr_latency_ns / line_time_us) + 1) *
+ pixel_size * sr_hdisplay) / 1000;
+ sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
+ DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ srwm = I945_FIFO_SIZE - sr_entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x3f;
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ }
- DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
+ (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
}
@@ -2553,7 +2656,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
pixel_size, latency_ns);
planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
pixel_size, latency_ns);
- DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
* Overlay gets an aggressive default since video jitter is bad.
@@ -2573,14 +2676,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = (((sr_latency_ns / line_time_us) + 1) *
pixel_size * sr_hdisplay) / 1000;
sr_entries = roundup(sr_entries / cacheline_size, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
}
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
@@ -2607,7 +2710,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
pixel_size, latency_ns);
fwater_lo |= (3<<8) | planea_wm;
- DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm);
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
@@ -2661,11 +2764,11 @@ static void intel_update_watermarks(struct drm_device *dev)
if (crtc->enabled) {
enabled++;
if (intel_crtc->plane == 0) {
- DRM_DEBUG("plane A (pipe %d) clock: %d\n",
+ DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
intel_crtc->pipe, crtc->mode.clock);
planea_clock = crtc->mode.clock;
} else {
- DRM_DEBUG("plane B (pipe %d) clock: %d\n",
+ DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
intel_crtc->pipe, crtc->mode.clock);
planeb_clock = crtc->mode.clock;
}
@@ -2682,10 +2785,10 @@ static void intel_update_watermarks(struct drm_device *dev)
return;
/* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_IGD(dev))
- igd_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_IGD(dev))
- igd_disable_cxsr(dev);
+ if (enabled == 1 && IS_PINEVIEW(dev))
+ pineview_enable_cxsr(dev, sr_clock, pixel_size);
+ else if (IS_PINEVIEW(dev))
+ pineview_disable_cxsr(dev);
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
@@ -2779,10 +2882,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
} else if (IS_I9XX(dev)) {
refclk = 96000;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
@@ -2802,14 +2906,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
- if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
+ if (is_lvds && limit->find_reduced_pll &&
+ dev_priv->lvds_downclock_avail) {
memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
has_reduced_clock = limit->find_reduced_pll(limit, crtc,
- (adjusted_mode->clock*3/4),
+ dev_priv->lvds_downclock,
refclk,
&reduced_clock);
+ if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+ /*
+ * If the different P is found, it means that we can't
+ * switch the display clock by using the FP0/FP1.
+ * In such case we will disable the LVDS downclock
+ * feature.
+ */
+ DRM_DEBUG_KMS("Different P is found for "
+ "LVDS clock/downclock\n");
+ has_reduced_clock = 0;
+ }
}
-
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
@@ -2831,7 +2946,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* FDI link */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
int lane, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -2873,8 +2988,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
- igdng_compute_m_n(bpp, lane, target_clock,
- link_bw, &m_n);
+ ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
/* Ironlake: try to setup display ref clock before DPLL
@@ -2882,7 +2996,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -2917,7 +3031,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (IS_IGD(dev)) {
+ if (IS_PINEVIEW(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = (1 << reduced_clock.n) << 16 |
@@ -2929,7 +3043,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -2942,19 +3056,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (IS_IGDNG(dev))
+ else if (IS_IRONLAKE(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
if (is_dp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_IGD(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -2973,7 +3087,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3005,9 +3119,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- /* IGDNG's plane is forced to pipe, bit 24 is to
+ /* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -3034,20 +3148,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Disable the panel fitter if it was on our pipe */
- if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
+ if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
- DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- /* assign to IGDNG registers */
- if (IS_IGDNG(dev)) {
+ /* assign to Ironlake registers */
+ if (IS_IRONLAKE(dev)) {
fp_reg = pch_fp_reg;
dpll_reg = pch_dpll_reg;
}
if (is_edp) {
- igdng_disable_pll_edp(crtc);
+ ironlake_disable_pll_edp(crtc);
} else if ((dpll & DPLL_VCO_ENABLE)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3062,7 +3176,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
u32 lvds;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
@@ -3095,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
udelay(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
if (is_sdvo) {
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3115,14 +3229,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(fp_reg + 4, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG("enabling CxSR downclocking\n");
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(fp_reg + 4, fp);
intel_crtc->lowfreq_avail = false;
if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG("disabling CxSR downclocking\n");
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
@@ -3142,21 +3256,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* pipesrc and dspsize control the size that is scaled from, which should
* always be the user's requested size.
*/
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(dsppos_reg, 0);
}
I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
I915_WRITE(link_m1_reg, m_n.link_m);
I915_WRITE(link_n1_reg, m_n.link_n);
if (is_edp) {
- igdng_set_pll_edp(crtc, adjusted_mode->clock);
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
@@ -3170,7 +3284,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3204,8 +3318,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- /* use legacy palette for IGDNG */
- if (IS_IGDNG(dev))
+ /* use legacy palette for Ironlake */
+ if (IS_IRONLAKE(dev))
palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
LGC_PALETTE_B;
@@ -3234,11 +3348,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
size_t addr;
int ret;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
- DRM_DEBUG("cursor off\n");
+ DRM_DEBUG_KMS("cursor off\n");
if (IS_MOBILE(dev) || IS_I9XX(dev)) {
temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
temp |= CURSOR_MODE_DISABLE;
@@ -3546,18 +3660,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_IGD(dev)) {
- clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
- clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ if (IS_PINEVIEW(dev)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
if (IS_I9XX(dev)) {
- if (IS_IGD(dev))
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+ if (IS_PINEVIEW(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -3572,7 +3686,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
7 : 14;
break;
default:
- DRM_DEBUG("Unknown DPLL mode %08x in programmed "
+ DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return 0;
}
@@ -3658,7 +3772,7 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("idle timer fired, downclocking\n");
+ DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
dev_priv->busy = false;
@@ -3669,11 +3783,11 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG("not reclocking render clock\n");
+ DRM_DEBUG_DRIVER("not reclocking render clock\n");
return;
}
@@ -3682,7 +3796,7 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
else if (IS_I85X(dev))
pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
- DRM_DEBUG("increasing render clock frequency\n");
+ DRM_DEBUG_DRIVER("increasing render clock frequency\n");
/* Schedule downclock */
if (schedule)
@@ -3694,11 +3808,11 @@ void intel_decrease_renderclock(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG("not reclocking render clock\n");
+ DRM_DEBUG_DRIVER("not reclocking render clock\n");
return;
}
@@ -3758,7 +3872,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
}
- DRM_DEBUG("decreasing render clock frequency\n");
+ DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
}
/* Note that no increase function is needed for this - increase_renderclock()
@@ -3766,7 +3880,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
*/
void intel_decrease_displayclock(struct drm_device *dev)
{
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
@@ -3792,7 +3906,7 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG("idle timer fired, downclocking\n");
+ DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
intel_crtc->busy = false;
@@ -3808,14 +3922,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG("upclocking LVDS\n");
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3826,7 +3940,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG("failed to upclock LVDS!\n");
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3847,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3858,7 +3972,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
- DRM_DEBUG("downclocking LVDS\n");
+ DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3869,7 +3983,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
- DRM_DEBUG("failed to downclock LVDS!\n");
+ DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3936,8 +4050,13 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- dev_priv->busy = true;
- intel_increase_renderclock(dev, true);
+ if (!dev_priv->busy) {
+ dev_priv->busy = true;
+ intel_increase_renderclock(dev, true);
+ } else {
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -3967,6 +4086,158 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+};
+
+static void intel_unpin_work_fn(struct work_struct *__work)
+{
+ struct intel_unpin_work *work =
+ container_of(__work, struct intel_unpin_work, work);
+
+ mutex_lock(&work->dev->struct_mutex);
+ i915_gem_object_unpin(work->obj);
+ drm_gem_object_unreference(work->obj);
+ mutex_unlock(&work->dev->struct_mutex);
+ kfree(work);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ if (work == NULL || !work->pending) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ intel_crtc->unpin_work = NULL;
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ if (work->event) {
+ e = work->event;
+ do_gettimeofday(&now);
+ e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ obj_priv = work->obj->driver_private;
+ if (atomic_dec_and_test(&obj_priv->pending_flip))
+ DRM_WAKEUP(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work)
+ intel_crtc->unpin_work->pending = 1;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ int ret;
+ RING_LOCALS;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&dev->struct_mutex);
+
+ work->event = event;
+ work->dev = crtc->dev;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ work->obj = intel_fb->obj;
+ INIT_WORK(&work->work, intel_unpin_work_fn);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(work);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+ intel_crtc->unpin_work = work;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj);
+ if (ret != 0) {
+ kfree(work);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* Reference the old fb object for the scheduled work. */
+ drm_gem_object_reference(work->obj);
+
+ crtc->fb = fb;
+ i915_gem_object_flush_write_domain(obj);
+ drm_vblank_get(dev, intel_crtc->pipe);
+ obj_priv = obj->driver_private;
+ atomic_inc(&obj_priv->pending_flip);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ if (IS_I965G(dev)) {
+ OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING((fb->width << 16) | fb->height);
+ } else {
+ OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(MI_NOOP);
+ }
+ ADVANCE_LP_RING();
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
@@ -3983,11 +4254,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = intel_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
};
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
@@ -4010,10 +4283,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
- DRM_DEBUG("swapping pipes & planes for FBC\n");
+ DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = ((pipe == 0) ? 1 : 0);
}
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -4090,7 +4368,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
int found;
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4118,7 +4396,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D);
- } else if (IS_I9XX(dev)) {
+ } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
@@ -4145,10 +4423,10 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D);
- } else
+ } else if (IS_I8XX(dev))
intel_dvo_init(dev);
- if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
+ if (SUPPORTS_TV(dev))
intel_tv_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -4257,7 +4535,7 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4291,11 +4569,52 @@ void intel_init_clock_gating(struct drm_device *dev)
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
- } else if (IS_I855(dev) || IS_I865G(dev)) {
+ } else if (IS_I85X(dev) || IS_I865G(dev)) {
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
} else if (IS_I830(dev)) {
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ if (I915_HAS_RC6(dev)) {
+ struct drm_gem_object *pwrctx;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (dev_priv->pwrctx) {
+ obj_priv = dev_priv->pwrctx->driver_private;
+ } else {
+ pwrctx = drm_gem_object_alloc(dev, 4096);
+ if (!pwrctx) {
+ DRM_DEBUG("failed to alloc power context, "
+ "RC6 disabled\n");
+ goto out;
+ }
+
+ ret = i915_gem_object_pin(pwrctx, 4096);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n",
+ ret);
+ drm_gem_object_unreference(pwrctx);
+ goto out;
+ }
+
+ i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+
+ dev_priv->pwrctx = pwrctx;
+ obj_priv = pwrctx->driver_private;
+ }
+
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ }
+
+out:
+ return;
}
/* Set up chip specific display functions */
@@ -4304,8 +4623,8 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_IGDNG(dev))
- dev_priv->display.dpms = igdng_crtc_dpms;
+ if (IS_IRONLAKE(dev))
+ dev_priv->display.dpms = ironlake_crtc_dpms;
else
dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4324,13 +4643,13 @@ static void intel_init_display(struct drm_device *dev)
}
/* Returns the core display clock speed */
- if (IS_I945G(dev))
+ if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_I915GM(dev))
@@ -4339,7 +4658,7 @@ static void intel_init_display(struct drm_device *dev)
else if (IS_I865G(dev))
dev_priv->display.get_display_clock_speed =
i865_get_display_clock_speed;
- else if (IS_I855(dev))
+ else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
i855_get_display_clock_speed;
else /* 852, 830 */
@@ -4347,7 +4666,7 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->display.update_wm = NULL;
else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
@@ -4403,7 +4722,7 @@ void intel_modeset_init(struct drm_device *dev)
num_pipe = 2;
else
num_pipe = 1;
- DRM_DEBUG("%d display pipe%s available.\n",
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
num_pipe, num_pipe > 1 ? "s" : "");
if (IS_I85X(dev))
@@ -4422,6 +4741,15 @@ void intel_modeset_init(struct drm_device *dev)
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
+
+ intel_setup_overlay(dev);
+
+ if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq))
+ DRM_INFO("failed to find known CxSR latency "
+ "(found fsb freq %d, mem freq %d), disabling CxSR\n",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4445,11 +4773,21 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
- mutex_unlock(&dev->struct_mutex);
-
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ if (dev_priv->pwrctx) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = dev_priv->pwrctx->driver_private;
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
+ I915_READ(PWRCTXA);
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(dev_priv->pwrctx);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
drm_mode_config_cleanup(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d83447557f9..4e7aa8b7b93 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -33,7 +33,8 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
+
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -223,8 +224,8 @@ intel_dp_aux_ch(struct intel_output *intel_output,
*/
if (IS_eDP(intel_output))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (IS_IGDNG(dev))
- aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */
+ else if (IS_IRONLAKE(dev))
+ aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -282,7 +283,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
return -ETIMEDOUT;
}
@@ -382,17 +383,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
}
static int
-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_bytes)
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct intel_dp_priv *dp_priv = container_of(adapter,
struct intel_dp_priv,
adapter);
struct intel_output *intel_output = dp_priv->intel_output;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
- return intel_dp_aux_ch(intel_output,
- send, send_bytes, recv, recv_bytes);
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_ch defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
}
static int
@@ -435,7 +496,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
- DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
+ DRM_DEBUG_KMS("Display port link bw %02x lane "
+ "count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
@@ -514,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(3, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -606,23 +668,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
-static void igdng_edp_backlight_on (struct drm_device *dev)
+static void ironlake_edp_backlight_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
}
-static void igdng_edp_backlight_off (struct drm_device *dev)
+static void ironlake_edp_backlight_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
@@ -641,13 +703,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_output, dp_priv->DP);
if (IS_eDP(intel_output))
- igdng_edp_backlight_off(dev);
+ ironlake_edp_backlight_off(dev);
}
} else {
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
if (IS_eDP(intel_output))
- igdng_edp_backlight_on(dev);
+ ironlake_edp_backlight_on(dev);
}
}
dp_priv->dpms_mode = mode;
@@ -1010,7 +1072,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (IS_eDP(intel_output)) {
DP &= ~DP_PLL_ENABLE;
@@ -1071,7 +1133,7 @@ intel_dp_check_link_status(struct intel_output *intel_output)
}
static enum drm_connector_status
-igdng_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
@@ -1106,8 +1168,8 @@ intel_dp_detect(struct drm_connector *connector)
dp_priv->has_audio = false;
- if (IS_IGDNG(dev))
- return igdng_dp_detect(connector);
+ if (IS_IRONLAKE(dev))
+ return ironlake_dp_detect(connector);
temp = I915_READ(PORT_HOTPLUG_EN);
@@ -1227,7 +1289,53 @@ intel_dp_hot_plug(struct intel_output *intel_output)
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_output);
}
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given DP is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it is assumed that the given
+ * DP is present.
+ */
+static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, dp_port, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ dp_port = 0;
+ if (dp_reg == DP_B || dp_reg == PCH_DP_B)
+ dp_port = PORT_IDPB;
+ else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
+ dp_port = PORT_IDPC;
+ else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
+ dp_port = PORT_IDPD;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not DP, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_DP &&
+ p_child->device_type != DEVICE_TYPE_eDP)
+ continue;
+ /* Find the eDP port */
+ if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
+ ret = 1;
+ break;
+ }
+ /* Find the DP port */
+ if (p_child->dvo_port == dp_port) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1237,6 +1345,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
struct intel_dp_priv *dp_priv;
const char *name = NULL;
+ if (!dp_is_present_in_vbt(dev, output_reg)) {
+ DRM_DEBUG_KMS("DP is not present. Ignore it\n");
+ return;
+ }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_output)
@@ -1254,11 +1366,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else
intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
- if (output_reg == DP_B)
+ if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C)
+ else if (output_reg == DP_C || output_reg == PCH_DP_C)
intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D)
+ else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
if (IS_eDP(intel_output)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
deleted file mode 100644
index 2b38054d3b6..00000000000
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright © 2008 Keith Packard
- *
- * Permission to use, copy, modify, distribute, and sell this software and its
- * documentation for any purpose is hereby granted without fee, provided that
- * the above copyright notice appear in all copies and that both that copyright
- * notice and this permission notice appear in supporting documentation, and
- * that the name of the copyright holders not be used in advertising or
- * publicity pertaining to distribution of the software without specific,
- * written prior permission. The copyright holders make no representations
- * about the suitability of this software for any purpose. It is provided "as
- * is" without express or implied warranty.
- *
- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
- * OF THIS SOFTWARE.
- */
-
-#ifndef _INTEL_DP_H_
-#define _INTEL_DP_H_
-
-/* From the VESA DisplayPort spec */
-
-#define AUX_NATIVE_WRITE 0x8
-#define AUX_NATIVE_READ 0x9
-#define AUX_I2C_WRITE 0x0
-#define AUX_I2C_READ 0x1
-#define AUX_I2C_STATUS 0x2
-#define AUX_I2C_MOT 0x4
-
-#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
-#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
-#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
-#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
-
-#define AUX_I2C_REPLY_ACK (0x0 << 6)
-#define AUX_I2C_REPLY_NACK (0x1 << 6)
-#define AUX_I2C_REPLY_DEFER (0x2 << 6)
-#define AUX_I2C_REPLY_MASK (0x3 << 6)
-
-/* AUX CH addresses */
-#define DP_LINK_BW_SET 0x100
-# define DP_LINK_BW_1_62 0x06
-# define DP_LINK_BW_2_7 0x0a
-
-#define DP_LANE_COUNT_SET 0x101
-# define DP_LANE_COUNT_MASK 0x0f
-# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
-
-#define DP_TRAINING_PATTERN_SET 0x102
-
-# define DP_TRAINING_PATTERN_DISABLE 0
-# define DP_TRAINING_PATTERN_1 1
-# define DP_TRAINING_PATTERN_2 2
-# define DP_TRAINING_PATTERN_MASK 0x3
-
-# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
-# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
-# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
-# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
-# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
-
-# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
-# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
-
-# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
-# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
-# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
-# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
-
-#define DP_TRAINING_LANE0_SET 0x103
-#define DP_TRAINING_LANE1_SET 0x104
-#define DP_TRAINING_LANE2_SET 0x105
-#define DP_TRAINING_LANE3_SET 0x106
-
-# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
-# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
-# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
-# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
-# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
-
-# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
-# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
-
-# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
-# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
-
-#define DP_DOWNSPREAD_CTRL 0x107
-# define DP_SPREAD_AMP_0_5 (1 << 4)
-
-#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
-# define DP_SET_ANSI_8B10B (1 << 0)
-
-#define DP_LANE0_1_STATUS 0x202
-#define DP_LANE2_3_STATUS 0x203
-
-# define DP_LANE_CR_DONE (1 << 0)
-# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
-# define DP_LANE_SYMBOL_LOCKED (1 << 2)
-
-#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
-
-#define DP_INTERLANE_ALIGN_DONE (1 << 0)
-#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
-#define DP_LINK_STATUS_UPDATED (1 << 7)
-
-#define DP_SINK_STATUS 0x205
-
-#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
-#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
-
-#define DP_ADJUST_REQUEST_LANE0_1 0x206
-#define DP_ADJUST_REQUEST_LANE2_3 0x207
-
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
-
-struct i2c_algo_dp_aux_data {
- bool running;
- u16 address;
- int (*aux_ch) (struct i2c_adapter *adapter,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_bytes);
-};
-
-int
-i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
-
-#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ef61fe9507e..a51573da1ff 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,32 @@ struct intel_output {
int clone_mask;
};
+struct intel_crtc;
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ void *virt_addr;
+ /* flip handling */
+ uint32_t last_flip_req;
+ int hw_wedged;
+#define HW_WEDGED 1
+#define NEEDS_WAIT_FOR_FLIP 2
+#define RELEASE_OLD_VID 3
+#define SWITCH_OFF_STAGE_1 4
+#define SWITCH_OFF_STAGE_2 5
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -121,6 +147,8 @@ struct intel_crtc {
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
+ struct intel_overlay *overlay;
+ struct intel_unpin_work *unpin_work;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -134,6 +162,8 @@ void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
+void intel_i2c_reset_gmbus(struct drm_device *dev);
+
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -148,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
extern void intel_edp_link_config (struct intel_output *, int *, int *);
+extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -177,10 +208,23 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
+extern void intel_init_clock_gating(struct drm_device *dev);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_framebuffer **fb,
struct drm_gem_object *obj);
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ int interruptible);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 2b0fe54cd92..371d753e362 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -70,7 +70,7 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
/**
- * Curretly it is assumed that the old framebuffer is reused.
+ * Currently it is assumed that the old framebuffer is reused.
*
* LOCKING
* caller should hold the mode config lock.
@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
par->intel_fb = intel_fb;
/* To allow resizeing without swapping buffers */
- DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
- intel_fb->base.height, obj_priv->gtt_offset, fbo);
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ intel_fb->base.width, intel_fb->base.height,
+ obj_priv->gtt_offset, fbo);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
{
int ret;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c33451aec1b..f04dbbe7d40 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -225,7 +225,52 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_hdmi_enc_destroy,
};
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given HDMI is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the given
+ * HDMI is present.
+ */
+static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, hdmi_port, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ if (hdmi_reg == SDVOB)
+ hdmi_port = DVO_B;
+ else if (hdmi_reg == SDVOC)
+ hdmi_port = DVO_C;
+ else if (hdmi_reg == HDMIB)
+ hdmi_port = DVO_B;
+ else if (hdmi_reg == HDMIC)
+ hdmi_port = DVO_C;
+ else if (hdmi_reg == HDMID)
+ hdmi_port = DVO_D;
+ else
+ return 0;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not HDMI, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_HDMI)
+ continue;
+ /* Find the HDMI port */
+ if (p_child->dvo_port == hdmi_port) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -233,6 +278,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_output *intel_output;
struct intel_hdmi_priv *hdmi_priv;
+ if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
+ DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
+ return;
+ }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c7eab724c41..8673c735b8a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
struct drm_i915_private *dev_priv = dev->dev_private;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_IGD(dev))
+ if (!IS_PINEVIEW(dev))
return;
if (enable)
I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +118,23 @@ static void set_data(void *data, int state_high)
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
+/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
+ * engine, but if the BIOS leaves it enabled, then that can break our use
+ * of the bit-banging I2C interfaces. This is notably the case with the
+ * Mac Mini in EFI mode.
+ */
+void
+intel_i2c_reset_gmbus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(PCH_GMBUS0, 0);
+ } else {
+ I915_WRITE(GMBUS0, 0);
+ }
+}
+
/**
* intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
if(i2c_bit_add_bus(&chan->adapter))
goto out_free;
+ intel_i2c_reset_gmbus(dev);
+
/* JJJ: raise SCL and SDA? */
intel_i2c_quirk_set(dev, true);
set_data(chan, 1);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 05598ae10c4..3118ce274e6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl, reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_PCH_CTL2;
else
reg = BLC_PWM_CTL;
@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_status, ctl_reg, status_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
ctl_reg = PCH_PP_CONTROL;
status_reg = PCH_PP_STATUS;
} else {
@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* full screen scale for now */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
goto out;
/* 965+ wants fuzzy fitting */
@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
/*
@@ -899,7 +899,7 @@ static int intel_lid_present(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
- check_lid_device, &lid_present, NULL);
+ check_lid_device, NULL, &lid_present, NULL);
return lid_present;
}
@@ -914,6 +914,101 @@ static int intel_lid_present(void)
#endif
/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *scan, *panel_fixed_mode;
+ int temp_downclock;
+
+ panel_fixed_mode = dev_priv->panel_fixed_mode;
+ temp_downclock = panel_fixed_mode->clock;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found for the LVDS. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+ scan->hsync_start == panel_fixed_mode->hsync_start &&
+ scan->hsync_end == panel_fixed_mode->hsync_end &&
+ scan->htotal == panel_fixed_mode->htotal &&
+ scan->vdisplay == panel_fixed_mode->vdisplay &&
+ scan->vsync_start == panel_fixed_mode->vsync_start &&
+ scan->vsync_end == panel_fixed_mode->vsync_end &&
+ scan->vtotal == panel_fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ }
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ if (temp_downclock < panel_fixed_mode->clock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+ "Normal clock %dKhz, downclock %dKhz\n",
+ panel_fixed_mode->clock, temp_downclock);
+ }
+ return;
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ * Note: The addin_offset should also be checked for LVDS panel.
+ * Only when it is non-zero, it is assumed that it is present.
+ */
+static int lvds_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not LFP, continue.
+ * If the device type is 0x22, it is also regarded as LFP.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+ p_child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ /* The addin_offset should be checked. Only when it is
+ * non-zero, it is regarded as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
*
@@ -936,21 +1031,20 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- /* Assume that any device without an ACPI LID device also doesn't
- * have an integrated LVDS. We would be better off parsing the BIOS
- * to get a reliable indicator, but that code isn't written yet.
- *
- * In the case of all-in-one desktops using LVDS that we've seen,
- * they're using SDVO LVDS.
+ /*
+ * Assume LVDS is present if there's an ACPI lid device or if the
+ * device is present in the VBT.
*/
- if (!intel_lid_present())
+ if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
return;
+ }
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
if (dev_priv->edp_support) {
- DRM_DEBUG("disable LVDS for eDP support\n");
+ DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
gpio = PCH_GPIOC;
@@ -1023,6 +1117,7 @@ void intel_lvds_init(struct drm_device *dev)
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
mutex_unlock(&dev->mode_config.mutex);
+ intel_find_lvds_downclock(dev, connector);
goto out;
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1142,8 @@ void intel_lvds_init(struct drm_device *dev)
* correct mode.
*/
- /* IGDNG: FIXME if still fail, not try pipe mode now */
- if (IS_IGDNG(dev))
+ /* Ironlake: FIXME if still fail, not try pipe mode now */
+ if (IS_IRONLAKE(dev))
goto failed;
lvds = I915_READ(LVDS);
@@ -1069,7 +1164,7 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
u32 pwm;
/* make sure PWM is enabled */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,7 +1177,7 @@ out:
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
- DRM_DEBUG("lid notifier registration failed\n");
+ DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
drm_sysfs_connector_add(connector);
@@ -1093,5 +1188,6 @@ failed:
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
kfree(intel_output);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 00000000000..2639591c72e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1416 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both. */
+#define IMAGE_MAX_WIDTH 2048
+#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY 1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE (0x1<<19)
+#define OCMD_MIRROR_MASK (0x3<<17)
+#define OCMD_MIRROR_MODE (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL (0x2<<17)
+#define OCMD_MIRROR_BOTH (0x3<<17)
+#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED (0x8<<10)
+#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR (0xc<<10)
+#define OCMD_YUV_422_PLANAR (0xd<<10)
+#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK (Ox1<<5)
+#define OCMD_BUF_TYPE_FRAME (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD (0x1<<5)
+#define OCMD_TEST_MODE (0x1<<4)
+#define OCMD_BUFFER_SELECT (0x3<<2)
+#define OCMD_BUFFER0 (0x0<<2)
+#define OCMD_BUFFER1 (0x1<<2)
+#define OCMD_FIELD_SELECT (0x1<<2)
+#define OCMD_FIELD0 (0x0<<1)
+#define OCMD_FIELD1 (0x1<<1)
+#define OCMD_ENABLE (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK (0x1<<18)
+#define OCONF_PIPE_A (0x0<<18)
+#define OCONF_PIPE_B (0x1<<18)
+#define OCONF_GAMMA2_ENABLE (0x1<<16)
+#define OCONF_CSC_MODE_BT601 (0x0<<5)
+#define OCONF_CSC_MODE_BT709 (0x1<<5)
+#define OCONF_CSC_BYPASS (0x1<<4)
+#define OCONF_CC_OUT_8BIT (0x1<<3)
+#define OCONF_TEST_MODE (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE (0x1<<31)
+#define CLK_RGB24_MASK 0x0
+#define CLK_RGB16_MASK 0x070307
+#define CLK_RGB15_MASK 0x070707
+#define CLK_RGB8I_MASK 0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+ (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+ (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS 5
+#define N_VERT_Y_TAPS 3
+#define N_HORIZ_UV_TAPS 3
+#define N_VERT_UV_TAPS 3
+#define N_PHASES 17
+#define MAX_TAPS 5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+
+
+static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ /* no recursive mappings */
+ BUG_ON(overlay->virt_addr);
+
+ if (OVERLAY_NONPHYSICAL(overlay->dev)) {
+ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
+
+ if (!regs) {
+ DRM_ERROR("failed to map overlay regs in GTT\n");
+ return NULL;
+ }
+ } else
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+
+ return overlay->virt_addr = regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (OVERLAY_NONPHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(overlay->virt_addr);
+
+ overlay->virt_addr = NULL;
+
+ I915_READ(OVADD); /* flush wc cashes */
+
+ return;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ RING_LOCALS;
+
+ BUG_ON(overlay->active);
+
+ overlay->active = 1;
+ overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ OUT_RING(overlay->flip_addr | OFC_UPDATE);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static void intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp;
+ RING_LOCALS;
+
+ BUG_ON(!overlay->active);
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ /* check for underruns */
+ tmp = I915_READ(DOVSTA);
+ if (tmp & (1 << 17))
+ DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+}
+
+static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ u32 tmp;
+ RING_LOCALS;
+
+ if (overlay->last_flip_req != 0) {
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret == 0) {
+ overlay->last_flip_req = 0;
+
+ tmp = I915_READ(ISR);
+
+ if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
+ return 0;
+ }
+ }
+
+ /* synchronous slowpath */
+ overlay->hw_wedged = RELEASE_OLD_VID;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+ u32 flip_addr = overlay->flip_addr;
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ RING_LOCALS;
+
+ BUG_ON(!overlay->active);
+
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
+ /* wait for overlay to go idle */
+ overlay->hw_wedged = SWITCH_OFF_STAGE_1;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ /* turn overlay off */
+ overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return ret;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_gem_object *obj;
+
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+ obj = overlay->vid_bo->obj;
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->vid_bo = NULL;
+
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ int interruptible)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ u32 flip_addr;
+ int ret;
+ RING_LOCALS;
+
+ if (overlay->hw_wedged == HW_WEDGED)
+ return -EIO;
+
+ if (overlay->last_flip_req == 0) {
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+ }
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+ if (ret != 0)
+ return ret;
+
+ switch (overlay->hw_wedged) {
+ case RELEASE_OLD_VID:
+ obj = overlay->old_vid_bo->obj;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->old_vid_bo = NULL;
+ break;
+ case SWITCH_OFF_STAGE_1:
+ flip_addr = overlay->flip_addr;
+ flip_addr |= OFC_UPDATE;
+
+ overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ interruptible);
+ if (ret != 0)
+ return ret;
+
+ case SWITCH_OFF_STAGE_2:
+ intel_overlay_off_tail(overlay);
+ break;
+ default:
+ BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
+ }
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs_atomic */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+ int ret;
+ struct drm_gem_object *obj;
+
+ /* only wait if there is actually an old frame to release to
+ * guarantee forward progress */
+ if (!overlay->old_vid_bo)
+ return 0;
+
+ ret = intel_overlay_wait_flip(overlay);
+ if (ret != 0)
+ return ret;
+
+ obj = overlay->old_vid_bo->obj;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->old_vid_bo = NULL;
+
+ return 0;
+}
+
+struct put_image_params {
+ int format;
+ short dst_x;
+ short dst_y;
+ short dst_w;
+ short dst_h;
+ short src_w;
+ short src_scan_h;
+ short src_scan_w;
+ short src_h;
+ short stride_Y;
+ short stride_UV;
+ int offset_Y;
+ int offset_U;
+ int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_hsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_vsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+ u32 mask, shift, ret;
+ if (IS_I9XX(dev)) {
+ mask = 0x3f;
+ shift = 6;
+ } else {
+ mask = 0x1f;
+ shift = 5;
+ }
+ ret = ((offset + width + mask) >> shift) - (offset >> shift);
+ if (IS_I9XX(dev))
+ ret <<= 1;
+ ret -=1;
+ return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+ 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+ 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+ 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+ 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+ 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+ 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+ 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+ 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+ 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+ 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+ 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+ 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+ 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+ 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+ 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+ 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+ 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+ 0x3000, 0x0800, 0x3000};
+
+static void update_polyphase_filter(struct overlay_registers *regs)
+{
+ memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+ struct overlay_registers *regs,
+ struct put_image_params *params)
+{
+ /* fixed point with a 12 bit shift */
+ u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+ bool scale_changed = false;
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+
+ if (params->dst_w > 1)
+ xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+ /(params->dst_w);
+ else
+ xscale = 1 << FP_SHIFT;
+
+ if (params->dst_h > 1)
+ yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+ /(params->dst_h);
+ else
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
+ /*} else {
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+ regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
+ | ((xscale >> FP_SHIFT) << 16)
+ | ((xscale & FRACT_MASK) << 3);
+ regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
+ | ((xscale_UV >> FP_SHIFT) << 16)
+ | ((xscale_UV & FRACT_MASK) << 3);
+ regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
+ | ((yscale_UV >> FP_SHIFT) << 0);
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+
+ return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ u32 key = overlay->color_key;
+ switch (overlay->crtc->base.fb->bits_per_pixel) {
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ }
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+ u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
+ }
+ } else { /* YUV packed */
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
+ }
+
+ switch (params->format & I915_OVERLAY_SWAP_MASK) {
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_gem_object *new_bo,
+ struct put_image_params *params)
+{
+ int ret, tmp_width;
+ struct overlay_registers *regs;
+ bool scale_changed = false;
+ struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+ struct drm_device *dev = overlay->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!overlay);
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
+ if (ret != 0)
+ goto out_unpin;
+
+ if (!overlay->active) {
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ if (IS_I965GM(overlay->dev))
+ regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+ regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+ regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+ if (params->format & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->format, params->src_w);
+ else
+ tmp_width = params->src_w;
+
+ regs->SWIDTH = params->src_w;
+ regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+ params->offset_Y, tmp_width);
+ regs->SHEIGHT = params->src_h;
+ regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OSTRIDE = params->stride_Y;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+ u32 tmp_U, tmp_V;
+ regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ params->src_w/uv_hscale);
+ tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ params->src_w/uv_hscale);
+ regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
+ regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+ regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
+ regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+ regs->OSTRIDE |= params->stride_UV << 16;
+ }
+
+ scale_changed = update_scaling_factors(overlay, regs, params);
+
+ update_colorkey(overlay, regs);
+
+ regs->OCMD = overlay_cmd_reg(params);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ intel_overlay_continue(overlay, scale_changed);
+
+ overlay->old_vid_bo = overlay->vid_bo;
+ overlay->vid_bo = new_bo->driver_private;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(new_bo);
+ return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+ int ret;
+ struct overlay_registers *regs;
+ struct drm_device *dev = overlay->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ if (overlay->hw_wedged) {
+ ret = intel_overlay_recover_from_interrupt(overlay, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (!overlay->active)
+ return 0;
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ regs->OCMD = 0;
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ ret = intel_overlay_off(overlay);
+ if (ret != 0)
+ return ret;
+
+ intel_overlay_off_tail(overlay);
+
+ return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ u32 pipeconf;
+ int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+
+ if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+ return -EINVAL;
+
+ pipeconf = I915_READ(pipeconf_reg);
+
+ /* can't use the overlay with double wide pipe */
+ if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 ratio;
+ u32 pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+ * line with the intel documentation for the i965 */
+ if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
+ ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
+ } else { /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ if (IS_I965G(dev))
+ ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+ else
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+ struct drm_intel_overlay_put_image *rec)
+{
+ struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+ if ((rec->dst_x < mode->crtc_hdisplay)
+ && (rec->dst_x + rec->dst_width
+ <= mode->crtc_hdisplay)
+ && (rec->dst_y < mode->crtc_vdisplay)
+ && (rec->dst_y + rec->dst_height
+ <= mode->crtc_vdisplay))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+ u32 tmp;
+
+ /* downscaling limit is 8.0 */
+ tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+ tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_gem_object *new_bo)
+{
+ u32 stride_mask;
+ int depth;
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+ size_t tmp;
+
+ /* check src dimensions */
+ if (IS_845G(dev) || IS_I830(dev)) {
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
+ || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+ if (rec->src_height > IMAGE_MAX_HEIGHT
+ || rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+ if (rec->src_height < N_VERT_Y_TAPS*4
+ || rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alingment constrains */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+ case I915_OVERLAY_YUV_PACKED:
+ depth = packed_depth_bytes(rec->flags);
+ if (uv_vscale != 1)
+ return -EINVAL;
+ if (depth < 0)
+ return depth;
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+ return -EINVAL;
+
+ /* stride checking */
+ stride_mask = 63;
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+ if (IS_I965G(dev) && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+ 4 : 8;
+ if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width)
+ > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ tmp = rec->stride_UV*rec->src_height;
+ tmp /= uv_vscale;
+ if (rec->offset_U + tmp > new_bo->size
+ || rec->offset_V + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_put_image *put_image_rec = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+ struct drm_gem_object *new_bo;
+ struct put_image_params *params;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_overlay_switch_off(overlay);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+ }
+
+ params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj)
+ return -ENOENT;
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+ new_bo = drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle);
+ if (!new_bo)
+ return -ENOENT;
+
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (overlay->hw_wedged) {
+ ret = intel_overlay_recover_from_interrupt(overlay, 1);
+ if (ret != 0)
+ goto out_unlock;
+ }
+
+ if (overlay->crtc != crtc) {
+ struct drm_display_mode *mode = &crtc->base.mode;
+ ret = intel_overlay_switch_off(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = check_overlay_possible_on_crtc(overlay, crtc);
+ if (ret != 0)
+ goto out_unlock;
+
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+ if (intel_panel_fitter_pipe(dev) == crtc->pipe
+ /* and line to wide, i.e. one-line-mode */
+ && mode->hdisplay > 1024) {
+ overlay->pfit_active = 1;
+ update_pfit_vscale_ratio(overlay);
+ } else
+ overlay->pfit_active = 0;
+ }
+
+ ret = check_overlay_dst(overlay, put_image_rec);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->pfit_active) {
+ params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+ overlay->pfit_vscale_ratio) + 1;
+ } else {
+ params->dst_y = put_image_rec->dst_y;
+ params->dst_h = put_image_rec->dst_height;
+ }
+ params->dst_x = put_image_rec->dst_x;
+ params->dst_w = put_image_rec->dst_width;
+
+ params->src_w = put_image_rec->src_width;
+ params->src_h = put_image_rec->src_height;
+ params->src_scan_w = put_image_rec->src_scan_width;
+ params->src_scan_h = put_image_rec->src_scan_height;
+ if (params->src_scan_h > params->src_h
+ || params->src_scan_w > params->src_w) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = check_overlay_src(dev, put_image_rec, new_bo);
+ if (ret != 0)
+ goto out_unlock;
+ params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+ params->stride_Y = put_image_rec->stride_Y;
+ params->stride_UV = put_image_rec->stride_UV;
+ params->offset_Y = put_image_rec->offset_Y;
+ params->offset_U = put_image_rec->offset_U;
+ params->offset_V = put_image_rec->offset_V;
+
+ /* Check scaling after src size to prevent a divide-by-zero. */
+ ret = check_overlay_scaling(params);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ kfree(params);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_gem_object_unreference(new_bo);
+ kfree(params);
+
+ return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+ regs->OCLRC1 = overlay->saturation;
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+ int i;
+
+ if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma5 >> i*8) & 0xff) == 0x80)
+ return false;
+ }
+
+ return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+ if (!check_gamma_bounds(0, attrs->gamma0)
+ || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
+ || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
+ || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
+ || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
+ || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
+ || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
+ return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_attrs *attrs = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+ if (IS_I9XX(dev)) {
+ attrs->gamma0 = I915_READ(OGAMC0);
+ attrs->gamma1 = I915_READ(OGAMC1);
+ attrs->gamma2 = I915_READ(OGAMC2);
+ attrs->gamma3 = I915_READ(OGAMC3);
+ attrs->gamma4 = I915_READ(OGAMC4);
+ attrs->gamma5 = I915_READ(OGAMC5);
+ }
+ ret = 0;
+ } else {
+ overlay->color_key = attrs->color_key;
+ if (attrs->brightness >= -128 && attrs->brightness <= 127) {
+ overlay->brightness = attrs->brightness;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (attrs->contrast <= 255) {
+ overlay->contrast = attrs->contrast;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (attrs->saturation <= 1023) {
+ overlay->saturation = attrs->saturation;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+ if (!IS_I9XX(dev)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (overlay->active) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = check_gamma(attrs);
+ if (ret != 0)
+ goto out_unlock;
+
+ I915_WRITE(OGAMC0, attrs->gamma0);
+ I915_WRITE(OGAMC1, attrs->gamma1);
+ I915_WRITE(OGAMC2, attrs->gamma2);
+ I915_WRITE(OGAMC3, attrs->gamma3);
+ I915_WRITE(OGAMC4, attrs->gamma4);
+ I915_WRITE(OGAMC5, attrs->gamma5);
+ }
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_gem_object *reg_bo;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!OVERLAY_EXISTS(dev))
+ return;
+
+ overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+ if (!overlay)
+ return;
+ overlay->dev = dev;
+
+ reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ if (!reg_bo)
+ goto out_free;
+ overlay->reg_bo = reg_bo->driver_private;
+
+ if (OVERLAY_NONPHYSICAL(dev)) {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ } else {
+ ret = i915_gem_attach_phys_object(dev, reg_bo,
+ I915_GEM_PHYS_OVERLAY_REGS);
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ }
+
+ /* init all values */
+ overlay->color_key = 0x0101fe;
+ overlay->brightness = -19;
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto out_free_bo;
+
+ memset(regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(regs);
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ dev_priv->overlay = overlay;
+ DRM_INFO("initialized overlay support\n");
+ return;
+
+out_free_bo:
+ drm_gem_object_unreference(reg_bo);
+out_free:
+ kfree(overlay);
+ return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->overlay) {
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ kfree(dev_priv->overlay);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 083bec2e50f..24a3dc99716 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,8 +36,6 @@
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-#undef SDVO_DEBUG
-
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name {
#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
-#ifdef SDVO_DEBUG
static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
void *args, int args_len)
{
@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
DRM_LOG_KMS("(%02X)", cmd);
DRM_LOG_KMS("\n");
}
-#else
-#define intel_sdvo_debug_write(o, c, a, l)
-#endif
static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
void *args, int args_len)
@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
}
-#ifdef SDVO_DEBUG
static const char *cmd_status_names[] = {
"Power on",
"Success",
@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
DRM_LOG_KMS("(??? %d)", status);
DRM_LOG_KMS("\n");
}
-#else
-#define intel_sdvo_debug_response(o, r, l, s)
-#endif
static u8 intel_sdvo_read_response(struct intel_output *intel_output,
void *response, int response_len)
@@ -1627,6 +1617,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
intel_sdvo_write_cmd(intel_output,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+ if (sdvo_priv->is_tv) {
+ /* add 30ms delay when the output type is SDVO-TV */
+ mdelay(30);
+ }
status = intel_sdvo_read_response(intel_output, &response, 2);
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
@@ -2726,7 +2720,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
/* Wrap with our custom algo which switches to DDC mode */
intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
- /* In defaut case sdvo lvds is false */
+ /* In default case sdvo lvds is false */
intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
if (intel_sdvo_output_setup(intel_output,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9ca917931af..552ec110b74 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl |= TV_TRILEVEL_SYNC;
if (tv_mode->pal_burst)
tv_ctl |= TV_PAL_BURST;
+
scctl1 = 0;
- /* dda1 implies valid video levels */
- if (tv_mode->dda1_inc) {
+ if (tv_mode->dda1_inc)
scctl1 |= TV_SC_DDA1_EN;
- }
-
if (tv_mode->dda2_inc)
scctl1 |= TV_SC_DDA2_EN;
-
if (tv_mode->dda3_inc)
scctl1 |= TV_SC_DDA3_EN;
-
scctl1 |= tv_mode->sc_reset;
- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ if (video_levels)
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG("Detected Composite TV connection\n");
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG("Detected S-Video TV connection\n");
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG("Detected Component TV connection\n");
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- DRM_DEBUG("No TV connection detected\n");
+ DRM_DEBUG_KMS("No TV connection detected\n");
type = -1;
}
@@ -1702,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_tv_enc_destroy,
};
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+ p_child->device_type != DEVICE_TYPE_TV)
+ continue;
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void
intel_tv_init(struct drm_device *dev)
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
+ if (!tv_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ return;
+ }
/* Even if we have an encoder we may not have a connector */
if (!dev_priv->int_tv_support)
return;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
new file mode 100644
index 00000000000..d823e631951
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -0,0 +1,44 @@
+config DRM_NOUVEAU
+ tristate "Nouveau (nVidia) cards"
+ depends on DRM
+ select FW_LOADER
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB
+ select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ help
+ Choose this option for open-source nVidia support.
+
+config DRM_NOUVEAU_BACKLIGHT
+ bool "Support for backlight control"
+ depends on DRM_NOUVEAU
+ default y
+ help
+ Say Y here if you want to control the backlight of your display
+ (e.g. a laptop panel).
+
+config DRM_NOUVEAU_DEBUG
+ bool "Build in Nouveau's debugfs support"
+ depends on DRM_NOUVEAU && DEBUG_FS
+ default y
+ help
+ Say Y here if you want Nouveau to output debugging information
+ via debugfs.
+
+menu "I2C encoder or helper chips"
+ depends on DRM
+
+config DRM_I2C_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ default m if DRM_NOUVEAU
+ help
+ Support for Chrontel ch7006 and similar TV encoders, found
+ on some nVidia video cards.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 00000000000..1d90d4d0144
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,31 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
+ nouveau_object.o nouveau_irq.o nouveau_notifier.o \
+ nouveau_sgdma.o nouveau_dma.o \
+ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
+ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
+ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
+ nouveau_dp.o \
+ nv04_timer.o \
+ nv04_mc.o nv40_mc.o nv50_mc.o \
+ nv04_fb.o nv10_fb.o nv40_fb.o \
+ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv04_graph.o nv10_graph.o nv20_graph.o \
+ nv40_graph.o nv50_graph.o \
+ nv04_instmem.o nv50_instmem.o \
+ nv50_crtc.o nv50_dac.o nv50_sor.o \
+ nv50_cursor.o nv50_display.o nv50_fbcon.o \
+ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
+ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+ nv17_gpio.o
+
+nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
+nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+
+obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
new file mode 100644
index 00000000000..1cf488247a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -0,0 +1,125 @@
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nv50_display.h"
+
+#define NOUVEAU_DSM_SUPPORTED 0x00
+#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
+
+#define NOUVEAU_DSM_ACTIVE 0x01
+#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
+
+#define NOUVEAU_DSM_LED 0x02
+#define NOUVEAU_DSM_LED_STATE 0x00
+#define NOUVEAU_DSM_LED_OFF 0x10
+#define NOUVEAU_DSM_LED_STAMINA 0x11
+#define NOUVEAU_DSM_LED_SPEED 0x12
+
+#define NOUVEAU_DSM_POWER 0x03
+#define NOUVEAU_DSM_POWER_STATE 0x00
+#define NOUVEAU_DSM_POWER_SPEED 0x01
+#define NOUVEAU_DSM_POWER_STAMINA 0x02
+
+static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
+{
+ static char muid[] = {
+ 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+ };
+
+ struct pci_dev *pdev = dev->pdev;
+ struct acpi_handle *handle;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object params[4];
+ union acpi_object *obj;
+ int err;
+
+ handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+
+ if (!handle)
+ return -ENODEV;
+
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(muid);
+ params[0].buffer.pointer = (char *)muid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = 0x00000102;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = arg;
+
+ err = acpi_evaluate_object(handle, "_DSM", &input, &output);
+ if (err) {
+ NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
+ return err;
+ }
+
+ obj = (union acpi_object *)output.pointer;
+
+ if (obj->type == ACPI_TYPE_INTEGER)
+ if (obj->integer.value == 0x80000002)
+ return -ENODEV;
+
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (obj->buffer.length == 4 && result) {
+ *result = 0;
+ *result |= obj->buffer.pointer[0];
+ *result |= (obj->buffer.pointer[1] << 8);
+ *result |= (obj->buffer.pointer[2] << 16);
+ *result |= (obj->buffer.pointer[3] << 24);
+ }
+ }
+
+ kfree(output.pointer);
+ return 0;
+}
+
+int nouveau_hybrid_setup(struct drm_device *dev)
+{
+ int result;
+
+ if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+ &result))
+ return -ENODEV;
+
+ NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
+
+ if (result & 0x1) { /* Stamina mode - disable the external GPU */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
+ NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
+ NULL);
+ } else { /* Ensure that the external GPU is enabled */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+ NULL);
+ }
+
+ return 0;
+}
+
+bool nouveau_dsm_probe(struct drm_device *dev)
+{
+ int support = 0;
+
+ if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
+ return false;
+
+ if (!support)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
new file mode 100644
index 00000000000..20564f8cb0e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Matthew Garrett <mjg@redhat.com>
+ *
+ * Register locations derived from NVClock by Roderick Colenbrander
+ */
+
+#include <linux/backlight.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+
+static int nv40_get_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
+ >> 16;
+
+ return val;
+}
+
+static int nv40_set_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int val = bd->props.brightness;
+ int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
+
+ nv_wr32(dev, NV40_PMC_BACKLIGHT,
+ (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
+
+ return 0;
+}
+
+static struct backlight_ops nv40_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nv40_get_intensity,
+ .update_status = nv40_set_intensity,
+};
+
+static int nv50_get_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+
+ return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
+}
+
+static int nv50_set_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int val = bd->props.brightness;
+
+ nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
+ val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
+ return 0;
+}
+
+static struct backlight_ops nv50_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nv50_get_intensity,
+ .update_status = nv50_set_intensity,
+};
+
+static int nouveau_nv40_backlight_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_device *bd;
+
+ if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
+ return 0;
+
+ bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ &nv40_bl_ops);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ dev_priv->backlight = bd;
+ bd->props.max_brightness = 31;
+ bd->props.brightness = nv40_get_intensity(bd);
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static int nouveau_nv50_backlight_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_device *bd;
+
+ if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
+ return 0;
+
+ bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ &nv50_bl_ops);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ dev_priv->backlight = bd;
+ bd->props.max_brightness = 1025;
+ bd->props.brightness = nv50_get_intensity(bd);
+ backlight_update_status(bd);
+ return 0;
+}
+
+int nouveau_backlight_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->card_type) {
+ case NV_40:
+ return nouveau_nv40_backlight_init(dev);
+ case NV_50:
+ return nouveau_nv50_backlight_init(dev);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void nouveau_backlight_exit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->backlight) {
+ backlight_device_unregister(dev_priv->backlight);
+ dev_priv->backlight = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
new file mode 100644
index 00000000000..5eec5ed6948
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -0,0 +1,6095 @@
+/*
+ * Copyright 2005-2006 Erik Waling
+ * Copyright 2006 Stephane Marchesin
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#define NV_DEBUG_NOTRACE
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+/* these defines are made up */
+#define NV_CIO_CRE_44_HEADA 0x0
+#define NV_CIO_CRE_44_HEADB 0x3
+#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
+#define LEGACY_I2C_CRT 0x80
+#define LEGACY_I2C_PANEL 0x81
+#define LEGACY_I2C_TV 0x82
+
+#define EDID1_LEN 128
+
+#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
+#define LOG_OLD_VALUE(x)
+
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+
+struct init_exec {
+ bool execute;
+ bool repeat;
+};
+
+static bool nv_cksum(const uint8_t *data, unsigned int length)
+{
+ /*
+ * There's a few checksums in the BIOS, so here's a generic checking
+ * function.
+ */
+ int i;
+ uint8_t sum = 0;
+
+ for (i = 0; i < length; i++)
+ sum += data[i];
+
+ if (sum)
+ return true;
+
+ return false;
+}
+
+static int
+score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
+{
+ if (!(data[0] == 0x55 && data[1] == 0xAA)) {
+ NV_TRACEWARN(dev, "... BIOS signature not found\n");
+ return 0;
+ }
+
+ if (nv_cksum(data, data[2] * 512)) {
+ NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
+ /* if a ro image is somewhat bad, it's probably all rubbish */
+ return writeable ? 2 : 1;
+ } else
+ NV_TRACE(dev, "... appears to be valid\n");
+
+ return 3;
+}
+
+static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pci_nv_20, save_pci_nv_20;
+ int pcir_ptr;
+ int i;
+
+ if (dev_priv->card_type >= NV_50)
+ pci_nv_20 = 0x88050;
+ else
+ pci_nv_20 = NV_PBUS_PCI_NV_20;
+
+ /* enable ROM access */
+ save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
+ nvWriteMC(dev, pci_nv_20,
+ save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+
+ /* bail if no rom signature */
+ if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
+ nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
+ goto out;
+
+ /* additional check (see note below) - read PCI record header */
+ pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
+ nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
+ if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
+ goto out;
+
+ /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a
+ * a good read may be obtained by waiting or re-reading (cargocult: 5x)
+ * each byte. we'll hope pramin has something usable instead
+ */
+ for (i = 0; i < NV_PROM_SIZE; i++)
+ data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
+
+out:
+ /* disable ROM access */
+ nvWriteMC(dev, pci_nv_20,
+ save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+}
+
+static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t old_bar0_pramin = 0;
+ int i;
+
+ if (dev_priv->card_type >= NV_50) {
+ uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
+
+ if (!vbios_vram)
+ vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
+
+ old_bar0_pramin = nv_rd32(dev, 0x1700);
+ nv_wr32(dev, 0x1700, vbios_vram >> 16);
+ }
+
+ /* bail if no rom signature */
+ if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
+ nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
+ goto out;
+
+ for (i = 0; i < NV_PROM_SIZE; i++)
+ data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
+
+out:
+ if (dev_priv->card_type >= NV_50)
+ nv_wr32(dev, 0x1700, old_bar0_pramin);
+}
+
+static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
+{
+ void __iomem *rom = NULL;
+ size_t rom_len;
+ int ret;
+
+ ret = pci_enable_rom(dev->pdev);
+ if (ret)
+ return;
+
+ rom = pci_map_rom(dev->pdev, &rom_len);
+ if (!rom)
+ goto out;
+ memcpy_fromio(data, rom, rom_len);
+ pci_unmap_rom(dev->pdev, rom);
+
+out:
+ pci_disable_rom(dev->pdev);
+}
+
+struct methods {
+ const char desc[8];
+ void (*loadbios)(struct drm_device *, uint8_t *);
+ const bool rw;
+ int score;
+};
+
+static struct methods nv04_methods[] = {
+ { "PROM", load_vbios_prom, false },
+ { "PRAMIN", load_vbios_pramin, true },
+ { "PCIROM", load_vbios_pci, true },
+ { }
+};
+
+static struct methods nv50_methods[] = {
+ { "PRAMIN", load_vbios_pramin, true },
+ { "PROM", load_vbios_prom, false },
+ { "PCIROM", load_vbios_pci, true },
+ { }
+};
+
+static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct methods *methods, *method;
+ int testscore = 3;
+
+ if (nouveau_vbios) {
+ method = nv04_methods;
+ while (method->loadbios) {
+ if (!strcasecmp(nouveau_vbios, method->desc))
+ break;
+ method++;
+ }
+
+ if (method->loadbios) {
+ NV_INFO(dev, "Attempting to use BIOS image from %s\n",
+ method->desc);
+
+ method->loadbios(dev, data);
+ if (score_vbios(dev, data, method->rw))
+ return true;
+ }
+
+ NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
+ }
+
+ if (dev_priv->card_type < NV_50)
+ methods = nv04_methods;
+ else
+ methods = nv50_methods;
+
+ method = methods;
+ while (method->loadbios) {
+ NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
+ method->desc);
+ data[0] = data[1] = 0; /* avoid reuse of previous image */
+ method->loadbios(dev, data);
+ method->score = score_vbios(dev, data, method->rw);
+ if (method->score == testscore)
+ return true;
+ method++;
+ }
+
+ while (--testscore > 0) {
+ method = methods;
+ while (method->loadbios) {
+ if (method->score == testscore) {
+ NV_TRACE(dev, "Using BIOS image from %s\n",
+ method->desc);
+ method->loadbios(dev, data);
+ return true;
+ }
+ method++;
+ }
+ }
+
+ NV_ERROR(dev, "No valid BIOS image found\n");
+ return false;
+}
+
+struct init_tbl_entry {
+ char *name;
+ uint8_t id;
+ int length;
+ int length_offset;
+ int length_multiplier;
+ bool (*handler)(struct nvbios *, uint16_t, struct init_exec *);
+};
+
+struct bit_entry {
+ uint8_t id[2];
+ uint16_t length;
+ uint16_t offset;
+};
+
+static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+
+#define MACRO_INDEX_SIZE 2
+#define MACRO_SIZE 8
+#define CONDITION_SIZE 12
+#define IO_FLAG_CONDITION_SIZE 9
+#define IO_CONDITION_SIZE 5
+#define MEM_INIT_SIZE 66
+
+static void still_alive(void)
+{
+#if 0
+ sync();
+ msleep(2);
+#endif
+}
+
+static uint32_t
+munge_reg(struct nvbios *bios, uint32_t reg)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct dcb_entry *dcbent = bios->display.output;
+
+ if (dev_priv->card_type < NV_50)
+ return reg;
+
+ if (reg & 0x40000000) {
+ BUG_ON(!dcbent);
+
+ reg += (ffs(dcbent->or) - 1) * 0x800;
+ if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
+ reg += 0x00000080;
+ }
+
+ reg &= ~0x60000000;
+ return reg;
+}
+
+static int
+valid_reg(struct nvbios *bios, uint32_t reg)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+
+ /* C51 has misaligned regs on purpose. Marvellous */
+ if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
+ NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
+ reg);
+ return 0;
+ }
+ /*
+ * Warn on C51 regs that have not been verified accessible in
+ * mmiotracing
+ */
+ if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
+ reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
+ NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
+ reg);
+
+ /* Trust the init scripts on G80 */
+ if (dev_priv->card_type >= NV_50)
+ return 1;
+
+ #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
+ if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
+ (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
+ WITHIN(reg, 0xc000, 0x48))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
+ if (reg == 0x00011014 || reg == 0x00020328)
+ return 1;
+ if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
+ return 1;
+ }
+ if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
+ return 1;
+ if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
+ WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
+ return 1;
+ #undef WITHIN
+
+ NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
+
+ return 0;
+}
+
+static bool
+valid_idx_port(struct nvbios *bios, uint16_t port)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+
+ /*
+ * If adding more ports here, the read/write functions below will need
+ * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
+ * used for the port in question
+ */
+ if (dev_priv->card_type < NV_50) {
+ if (port == NV_CIO_CRX__COLOR)
+ return true;
+ if (port == NV_VIO_SRX)
+ return true;
+ } else {
+ if (port == NV_CIO_CRX__COLOR)
+ return true;
+ }
+
+ NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
+ port);
+
+ return false;
+}
+
+static bool
+valid_port(struct nvbios *bios, uint16_t port)
+{
+ struct drm_device *dev = bios->dev;
+
+ /*
+ * If adding more ports here, the read/write functions below will need
+ * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
+ * used for the port in question
+ */
+ if (port == NV_VIO_VSE2)
+ return true;
+
+ NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
+
+ return false;
+}
+
+static uint32_t
+bios_rd32(struct nvbios *bios, uint32_t reg)
+{
+ uint32_t data;
+
+ reg = munge_reg(bios, reg);
+ if (!valid_reg(bios, reg))
+ return 0;
+
+ /*
+ * C51 sometimes uses regs with bit0 set in the address. For these
+ * cases there should exist a translation in a BIOS table to an IO
+ * port address which the BIOS uses for accessing the reg
+ *
+ * These only seem to appear for the power control regs to a flat panel,
+ * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs
+ * for 0x1308 and 0x1310 are used - hence the mask below. An S3
+ * suspend-resume mmio trace from a C51 will be required to see if this
+ * is true for the power microcode in 0x14.., or whether the direct IO
+ * port access method is needed
+ */
+ if (reg & 0x1)
+ reg &= ~0x1;
+
+ data = nv_rd32(bios->dev, reg);
+
+ BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
+
+ return data;
+}
+
+static void
+bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+
+ reg = munge_reg(bios, reg);
+ if (!valid_reg(bios, reg))
+ return;
+
+ /* see note in bios_rd32 */
+ if (reg & 0x1)
+ reg &= 0xfffffffe;
+
+ LOG_OLD_VALUE(bios_rd32(bios, reg));
+ BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
+
+ if (dev_priv->VBIOS.execute) {
+ still_alive();
+ nv_wr32(bios->dev, reg, data);
+ }
+}
+
+static uint8_t
+bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+ uint8_t data;
+
+ if (!valid_idx_port(bios, port))
+ return 0;
+
+ if (dev_priv->card_type < NV_50) {
+ if (port == NV_VIO_SRX)
+ data = NVReadVgaSeq(dev, bios->state.crtchead, index);
+ else /* assume NV_CIO_CRX__COLOR */
+ data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
+ } else {
+ uint32_t data32;
+
+ data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
+ data = (data32 >> ((index & 3) << 3)) & 0xff;
+ }
+
+ BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, "
+ "Head: 0x%02X, Data: 0x%02X\n",
+ port, index, bios->state.crtchead, data);
+ return data;
+}
+
+static void
+bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+
+ if (!valid_idx_port(bios, port))
+ return;
+
+ /*
+ * The current head is maintained in the nvbios member state.crtchead.
+ * We trap changes to CR44 and update the head variable and hence the
+ * register set written.
+ * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
+ * of the write, and to head1 after the write
+ */
+ if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
+ data != NV_CIO_CRE_44_HEADB)
+ bios->state.crtchead = 0;
+
+ LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
+ BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
+ "Head: 0x%02X, Data: 0x%02X\n",
+ port, index, bios->state.crtchead, data);
+
+ if (bios->execute && dev_priv->card_type < NV_50) {
+ still_alive();
+ if (port == NV_VIO_SRX)
+ NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
+ else /* assume NV_CIO_CRX__COLOR */
+ NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
+ } else
+ if (bios->execute) {
+ uint32_t data32, shift = (index & 3) << 3;
+
+ still_alive();
+
+ data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
+ data32 &= ~(0xff << shift);
+ data32 |= (data << shift);
+ bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
+ }
+
+ if (port == NV_CIO_CRX__COLOR &&
+ index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
+ bios->state.crtchead = 1;
+}
+
+static uint8_t
+bios_port_rd(struct nvbios *bios, uint16_t port)
+{
+ uint8_t data, head = bios->state.crtchead;
+
+ if (!valid_port(bios, port))
+ return 0;
+
+ data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
+
+ BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
+ port, head, data);
+
+ return data;
+}
+
+static void
+bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
+{
+ int head = bios->state.crtchead;
+
+ if (!valid_port(bios, port))
+ return;
+
+ LOG_OLD_VALUE(bios_port_rd(bios, port));
+ BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
+ port, head, data);
+
+ if (!bios->execute)
+ return;
+
+ still_alive();
+ NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
+}
+
+static bool
+io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+ /*
+ * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
+ * for the CRTC index; 1 byte for the mask to apply to the value
+ * retrieved from the CRTC; 1 byte for the shift right to apply to the
+ * masked CRTC value; 2 bytes for the offset to the flag array, to
+ * which the shifted value is added; 1 byte for the mask applied to the
+ * value read from the flag array; and 1 byte for the value to compare
+ * against the masked byte from the flag table.
+ */
+
+ uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
+ uint16_t crtcport = ROM16(bios->data[condptr]);
+ uint8_t crtcindex = bios->data[condptr + 2];
+ uint8_t mask = bios->data[condptr + 3];
+ uint8_t shift = bios->data[condptr + 4];
+ uint16_t flagarray = ROM16(bios->data[condptr + 5]);
+ uint8_t flagarraymask = bios->data[condptr + 7];
+ uint8_t cmpval = bios->data[condptr + 8];
+ uint8_t data;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
+ "Cmpval: 0x%02X\n",
+ offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
+
+ data = bios_idxprt_rd(bios, crtcport, crtcindex);
+
+ data = bios->data[flagarray + ((data & mask) >> shift)];
+ data &= flagarraymask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
+ offset, data, cmpval);
+
+ return (data == cmpval);
+}
+
+static bool
+bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+ /*
+ * The condition table entry has 4 bytes for the address of the
+ * register to check, 4 bytes for a mask to apply to the register and
+ * 4 for a test comparison value
+ */
+
+ uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
+ uint32_t reg = ROM32(bios->data[condptr]);
+ uint32_t mask = ROM32(bios->data[condptr + 4]);
+ uint32_t cmpval = ROM32(bios->data[condptr + 8]);
+ uint32_t data;
+
+ BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
+ offset, cond, reg, mask);
+
+ data = bios_rd32(bios, reg) & mask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
+ offset, data, cmpval);
+
+ return (data == cmpval);
+}
+
+static bool
+io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+ /*
+ * The IO condition entry has 2 bytes for the IO port address; 1 byte
+ * for the index to write to io_port; 1 byte for the mask to apply to
+ * the byte read from io_port+1; and 1 byte for the value to compare
+ * against the masked byte.
+ */
+
+ uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
+ uint16_t io_port = ROM16(bios->data[condptr]);
+ uint8_t port_index = bios->data[condptr + 2];
+ uint8_t mask = bios->data[condptr + 3];
+ uint8_t cmpval = bios->data[condptr + 4];
+
+ uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
+ offset, data, cmpval);
+
+ return (data == cmpval);
+}
+
+static int
+nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t reg0 = nv_rd32(dev, reg + 0);
+ uint32_t reg1 = nv_rd32(dev, reg + 4);
+ struct nouveau_pll_vals pll;
+ struct pll_lims pll_limits;
+ int ret;
+
+ ret = get_pll_limits(dev, reg, &pll_limits);
+ if (ret)
+ return ret;
+
+ clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
+ if (!clk)
+ return -ERANGE;
+
+ reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
+ reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
+
+ if (dev_priv->VBIOS.execute) {
+ still_alive();
+ nv_wr32(dev, reg + 4, reg1);
+ nv_wr32(dev, reg + 0, reg0);
+ }
+
+ return 0;
+}
+
+static int
+setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
+{
+ struct drm_device *dev = bios->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ /* clk in kHz */
+ struct pll_lims pll_lim;
+ struct nouveau_pll_vals pllvals;
+ int ret;
+
+ if (dev_priv->card_type >= NV_50)
+ return nv50_pll_set(dev, reg, clk);
+
+ /* high regs (such as in the mac g5 table) are not -= 4 */
+ ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
+ if (ret)
+ return ret;
+
+ clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
+ if (!clk)
+ return -ERANGE;
+
+ if (bios->execute) {
+ still_alive();
+ nouveau_hw_setpll(dev, reg, &pllvals);
+ }
+
+ return 0;
+}
+
+static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+
+ /*
+ * For the results of this function to be correct, CR44 must have been
+ * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
+ * and the DCB table parsed, before the script calling the function is
+ * run. run_digital_op_script is example of how to do such setup
+ */
+
+ uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
+
+ if (dcb_entry > bios->bdcb.dcb.entries) {
+ NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
+ "(%02X)\n", dcb_entry);
+ dcb_entry = 0x7f; /* unused / invalid marker */
+ }
+
+ return dcb_entry;
+}
+
+static struct nouveau_i2c_chan *
+init_i2c_device_find(struct drm_device *dev, int i2c_index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
+
+ if (i2c_index == 0xff) {
+ /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
+ int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
+ int default_indices = bdcb->i2c_default_indices;
+
+ if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
+ shift = 4;
+
+ i2c_index = (default_indices >> shift) & 0xf;
+ }
+ if (i2c_index == 0x80) /* g80+ */
+ i2c_index = bdcb->i2c_default_indices & 0xf;
+
+ return nouveau_i2c_find(dev, i2c_index);
+}
+
+static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
+{
+ /*
+ * For mlv < 0x80, it is an index into a table of TMDS base addresses.
+ * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
+ * CR58 for CR57 = 0 to index a table of offsets to the basic
+ * 0x6808b0 address.
+ * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
+ * CR58 for CR57 = 0 to index a table of offsets to the basic
+ * 0x6808b0 address, and then flip the offset by 8.
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const int pramdac_offset[13] = {
+ 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
+ const uint32_t pramdac_table[4] = {
+ 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
+
+ if (mlv >= 0x80) {
+ int dcb_entry, dacoffset;
+
+ /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
+ dcb_entry = dcb_entry_idx_from_crtchead(dev);
+ if (dcb_entry == 0x7f)
+ return 0;
+ dacoffset = pramdac_offset[
+ dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
+ if (mlv == 0x81)
+ dacoffset ^= 8;
+ return 0x6808b0 + dacoffset;
+ } else {
+ if (mlv > ARRAY_SIZE(pramdac_table)) {
+ NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
+ mlv);
+ return 0;
+ }
+ return pramdac_table[mlv];
+ }
+}
+
+static bool
+init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): count
+ * offset + 7 (32 bit): register
+ * offset + 11 (32 bit): configuration 1
+ * ...
+ *
+ * Starting at offset + 11 there are "count" 32 bit values.
+ * To find out which value to use read index "CRTC index" on "CRTC
+ * port", AND this value with "mask" and then bit shift right "shift"
+ * bits. Read the appropriate value using this index and write to
+ * "register"
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t shift = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 6];
+ uint32_t reg = ROM32(bios->data[offset + 7]);
+ uint8_t config;
+ uint32_t configval;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
+ offset, crtcport, crtcindex, mask, shift, count, reg);
+
+ config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+ if (config > count) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+ offset, config, count);
+ return false;
+ }
+
+ configval = ROM32(bios->data[offset + 11 + config * 4]);
+
+ BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
+
+ bios_wr32(bios, reg, configval);
+
+ return true;
+}
+
+static bool
+init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_REPEAT opcode: 0x33 ('3')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): count
+ *
+ * Execute script following this opcode up to INIT_REPEAT_END
+ * "count" times
+ */
+
+ uint8_t count = bios->data[offset + 1];
+ uint8_t i;
+
+ /* no iexec->execute check by design */
+
+ BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
+ offset, count);
+
+ iexec->repeat = true;
+
+ /*
+ * count - 1, as the script block will execute once when we leave this
+ * opcode -- this is compatible with bios behaviour as:
+ * a) the block is always executed at least once, even if count == 0
+ * b) the bios interpreter skips to the op following INIT_END_REPEAT,
+ * while we don't
+ */
+ for (i = 0; i < count - 1; i++)
+ parse_init_table(bios, offset + 2, iexec);
+
+ iexec->repeat = false;
+
+ return true;
+}
+
+static bool
+init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): IO flag condition index
+ * offset + 7 (8 bit): count
+ * offset + 8 (32 bit): register
+ * offset + 12 (16 bit): frequency 1
+ * ...
+ *
+ * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
+ * Set PLL register "register" to coefficients for frequency n,
+ * selected by reading index "CRTC index" of "CRTC port" ANDed with
+ * "mask" and shifted right by "shift".
+ *
+ * If "IO flag condition index" > 0, and condition met, double
+ * frequency before setting it.
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t shift = bios->data[offset + 5];
+ int8_t io_flag_condition_idx = bios->data[offset + 6];
+ uint8_t count = bios->data[offset + 7];
+ uint32_t reg = ROM32(bios->data[offset + 8]);
+ uint8_t config;
+ uint16_t freq;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
+ "Count: 0x%02X, Reg: 0x%08X\n",
+ offset, crtcport, crtcindex, mask, shift,
+ io_flag_condition_idx, count, reg);
+
+ config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+ if (config > count) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+ offset, config, count);
+ return false;
+ }
+
+ freq = ROM16(bios->data[offset + 12 + config * 2]);
+
+ if (io_flag_condition_idx > 0) {
+ if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
+ "frequency doubled\n", offset);
+ freq *= 2;
+ } else
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
+ "frequency unchanged\n", offset);
+ }
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
+ offset, reg, config, freq);
+
+ setPLL(bios, reg, freq * 10);
+
+ return true;
+}
+
+static bool
+init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_END_REPEAT opcode: 0x36 ('6')
+ *
+ * offset (8 bit): opcode
+ *
+ * Marks the end of the block for INIT_REPEAT to repeat
+ */
+
+ /* no iexec->execute check by design */
+
+ /*
+ * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
+ * we're not in repeat mode
+ */
+ if (iexec->repeat)
+ return false;
+
+ return true;
+}
+
+static bool
+init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COPY opcode: 0x37 ('7')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): srcmask
+ * offset + 7 (16 bit): CRTC port
+ * offset + 9 (8 bit): CRTC index
+ * offset + 10 (8 bit): mask
+ *
+ * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
+ * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
+ * port
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint8_t shift = bios->data[offset + 5];
+ uint8_t srcmask = bios->data[offset + 6];
+ uint16_t crtcport = ROM16(bios->data[offset + 7]);
+ uint8_t crtcindex = bios->data[offset + 9];
+ uint8_t mask = bios->data[offset + 10];
+ uint32_t data;
+ uint8_t crtcdata;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
+ "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
+ offset, reg, shift, srcmask, crtcport, crtcindex, mask);
+
+ data = bios_rd32(bios, reg);
+
+ if (shift < 0x80)
+ data >>= shift;
+ else
+ data <<= (0x100 - shift);
+
+ data &= srcmask;
+
+ crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
+ crtcdata |= (uint8_t)data;
+ bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
+
+ return true;
+}
+
+static bool
+init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_NOT opcode: 0x38 ('8')
+ *
+ * offset (8 bit): opcode
+ *
+ * Invert the current execute / no-execute condition (i.e. "else")
+ */
+ if (iexec->execute)
+ BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset);
+ else
+ BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
+
+ iexec->execute = !iexec->execute;
+ return true;
+}
+
+static bool
+init_io_flag_condition(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ *
+ * Check condition "condition number" in the IO flag condition table.
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return true;
+
+ if (io_flag_condition_met(bios, offset, cond))
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return true;
+}
+
+static bool
+init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): control register
+ * offset + 5 (32 bit): data register
+ * offset + 9 (32 bit): mask
+ * offset + 13 (32 bit): data
+ * offset + 17 (8 bit): count
+ * offset + 18 (8 bit): address 1
+ * offset + 19 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" address and data pairs, write "data n" to
+ * "data register", read the current value of "control register",
+ * and write it back once ANDed with "mask", ORed with "data",
+ * and ORed with "address n"
+ */
+
+ uint32_t controlreg = ROM32(bios->data[offset + 1]);
+ uint32_t datareg = ROM32(bios->data[offset + 5]);
+ uint32_t mask = ROM32(bios->data[offset + 9]);
+ uint32_t data = ROM32(bios->data[offset + 13]);
+ uint8_t count = bios->data[offset + 17];
+ uint32_t value;
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
+ "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
+ offset, controlreg, datareg, mask, data, count);
+
+ for (i = 0; i < count; i++) {
+ uint8_t instaddress = bios->data[offset + 18 + i * 2];
+ uint8_t instdata = bios->data[offset + 19 + i * 2];
+
+ BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
+ offset, instaddress, instdata);
+
+ bios_wr32(bios, datareg, instdata);
+ value = bios_rd32(bios, controlreg) & mask;
+ value |= data;
+ value |= instaddress;
+ bios_wr32(bios, controlreg, value);
+ }
+
+ return true;
+}
+
+static bool
+init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): count
+ * offset + 7 (32 bit): register
+ * offset + 11 (32 bit): frequency 1
+ * ...
+ *
+ * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
+ * Set PLL register "register" to coefficients for frequency n,
+ * selected by reading index "CRTC index" of "CRTC port" ANDed with
+ * "mask" and shifted right by "shift".
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t shift = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 6];
+ uint32_t reg = ROM32(bios->data[offset + 7]);
+ uint8_t config;
+ uint32_t freq;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
+ offset, crtcport, crtcindex, mask, shift, count, reg);
+
+ if (!reg)
+ return true;
+
+ config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+ if (config > count) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+ offset, config, count);
+ return false;
+ }
+
+ freq = ROM32(bios->data[offset + 11 + config * 4]);
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
+ offset, reg, config, freq);
+
+ setPLL(bios, reg, freq);
+
+ return true;
+}
+
+static bool
+init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_PLL2 opcode: 0x4B ('K')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): freq
+ *
+ * Set PLL register "register" to coefficients for frequency "freq"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t freq = ROM32(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
+ offset, reg, freq);
+
+ setPLL(bios, reg, freq);
+ return true;
+}
+
+static bool
+init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_I2C_BYTE opcode: 0x4C ('L')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): count
+ * offset + 4 (8 bit): I2C register 1
+ * offset + 5 (8 bit): mask 1
+ * offset + 6 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" registers given by "I2C register n" on the device
+ * addressed by "I2C slave address" on the I2C bus given by
+ * "DCB I2C table entry index", read the register, AND the result with
+ * "mask n" and OR it with "data n" before writing it back to the device
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t count = bios->data[offset + 3];
+ struct nouveau_i2c_chan *chan;
+ struct i2c_msg msg;
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, i2c_index, i2c_address, count);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return false;
+
+ for (i = 0; i < count; i++) {
+ uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+ uint8_t mask = bios->data[offset + 5 + i * 3];
+ uint8_t data = bios->data[offset + 6 + i * 3];
+ uint8_t value;
+
+ msg.addr = i2c_address;
+ msg.flags = I2C_M_RD;
+ msg.len = 1;
+ msg.buf = &value;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return false;
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, i2c_reg, value, mask, data);
+
+ value = (value & mask) | data;
+
+ if (bios->execute) {
+ msg.addr = i2c_address;
+ msg.flags = 0;
+ msg.len = 1;
+ msg.buf = &value;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_I2C_BYTE opcode: 0x4D ('M')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): count
+ * offset + 4 (8 bit): I2C register 1
+ * offset + 5 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" registers given by "I2C register n" on the device
+ * addressed by "I2C slave address" on the I2C bus given by
+ * "DCB I2C table entry index", set the register to "data n"
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t count = bios->data[offset + 3];
+ struct nouveau_i2c_chan *chan;
+ struct i2c_msg msg;
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, i2c_index, i2c_address, count);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return false;
+
+ for (i = 0; i < count; i++) {
+ uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
+ uint8_t data = bios->data[offset + 5 + i * 2];
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
+ offset, i2c_reg, data);
+
+ if (bios->execute) {
+ msg.addr = i2c_address;
+ msg.flags = 0;
+ msg.len = 1;
+ msg.buf = &data;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_I2C opcode: 0x4E ('N')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): count
+ * offset + 4 (8 bit): data 1
+ * ...
+ *
+ * Send "count" bytes ("data n") to the device addressed by "I2C slave
+ * address" on the I2C bus given by "DCB I2C table entry index"
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t count = bios->data[offset + 3];
+ struct nouveau_i2c_chan *chan;
+ struct i2c_msg msg;
+ uint8_t data[256];
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, i2c_index, i2c_address, count);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return false;
+
+ for (i = 0; i < count; i++) {
+ data[i] = bios->data[offset + 4 + i];
+
+ BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
+ }
+
+ if (bios->execute) {
+ msg.addr = i2c_address;
+ msg.flags = 0;
+ msg.len = count;
+ msg.buf = data;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_TMDS opcode: 0x4F ('O') (non-canon name)
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): magic lookup value
+ * offset + 2 (8 bit): TMDS address
+ * offset + 3 (8 bit): mask
+ * offset + 4 (8 bit): data
+ *
+ * Read the data reg for TMDS address "TMDS address", AND it with mask
+ * and OR it with data, then write it back
+ * "magic lookup value" determines which TMDS base address register is
+ * used -- see get_tmds_index_reg()
+ */
+
+ uint8_t mlv = bios->data[offset + 1];
+ uint32_t tmdsaddr = bios->data[offset + 2];
+ uint8_t mask = bios->data[offset + 3];
+ uint8_t data = bios->data[offset + 4];
+ uint32_t reg, value;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, mlv, tmdsaddr, mask, data);
+
+ reg = get_tmds_index_reg(bios->dev, mlv);
+ if (!reg)
+ return false;
+
+ bios_wr32(bios, reg,
+ tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
+ value = (bios_rd32(bios, reg + 4) & mask) | data;
+ bios_wr32(bios, reg + 4, value);
+ bios_wr32(bios, reg, tmdsaddr);
+
+ return true;
+}
+
+static bool
+init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name)
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): magic lookup value
+ * offset + 2 (8 bit): count
+ * offset + 3 (8 bit): addr 1
+ * offset + 4 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" TMDS address and data pairs write "data n" to
+ * "addr n". "magic lookup value" determines which TMDS base address
+ * register is used -- see get_tmds_index_reg()
+ */
+
+ uint8_t mlv = bios->data[offset + 1];
+ uint8_t count = bios->data[offset + 2];
+ uint32_t reg;
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
+ offset, mlv, count);
+
+ reg = get_tmds_index_reg(bios->dev, mlv);
+ if (!reg)
+ return false;
+
+ for (i = 0; i < count; i++) {
+ uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
+ uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
+
+ bios_wr32(bios, reg + 4, tmdsdata);
+ bios_wr32(bios, reg, tmdsaddr);
+ }
+
+ return true;
+}
+
+static bool
+init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): CRTC index1
+ * offset + 2 (8 bit): CRTC index2
+ * offset + 3 (8 bit): baseaddr
+ * offset + 4 (8 bit): count
+ * offset + 5 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" address and data pairs, write "baseaddr + n" to
+ * "CRTC index1" and "data n" to "CRTC index2"
+ * Once complete, restore initial value read from "CRTC index1"
+ */
+ uint8_t crtcindex1 = bios->data[offset + 1];
+ uint8_t crtcindex2 = bios->data[offset + 2];
+ uint8_t baseaddr = bios->data[offset + 3];
+ uint8_t count = bios->data[offset + 4];
+ uint8_t oldaddr, data;
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
+ "BaseAddr: 0x%02X, Count: 0x%02X\n",
+ offset, crtcindex1, crtcindex2, baseaddr, count);
+
+ oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
+
+ for (i = 0; i < count; i++) {
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
+ baseaddr + i);
+ data = bios->data[offset + 5 + i];
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
+ }
+
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
+
+ return true;
+}
+
+static bool
+init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_CR opcode: 0x52 ('R')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): CRTC index
+ * offset + 2 (8 bit): mask
+ * offset + 3 (8 bit): data
+ *
+ * Assign the value of at "CRTC index" ANDed with mask and ORed with
+ * data back to "CRTC index"
+ */
+
+ uint8_t crtcindex = bios->data[offset + 1];
+ uint8_t mask = bios->data[offset + 2];
+ uint8_t data = bios->data[offset + 3];
+ uint8_t value;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
+ offset, crtcindex, mask, data);
+
+ value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
+ value |= data;
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
+
+ return true;
+}
+
+static bool
+init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_CR opcode: 0x53 ('S')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): CRTC index
+ * offset + 2 (8 bit): value
+ *
+ * Assign "value" to CRTC register with index "CRTC index".
+ */
+
+ uint8_t crtcindex = ROM32(bios->data[offset + 1]);
+ uint8_t data = bios->data[offset + 2];
+
+ if (!iexec->execute)
+ return true;
+
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
+
+ return true;
+}
+
+static bool
+init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_CR_GROUP opcode: 0x54 ('T')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): count
+ * offset + 2 (8 bit): CRTC index 1
+ * offset + 3 (8 bit): value 1
+ * ...
+ *
+ * For "count", assign "value n" to CRTC register with index
+ * "CRTC index n".
+ */
+
+ uint8_t count = bios->data[offset + 1];
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ for (i = 0; i < count; i++)
+ init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
+
+ return true;
+}
+
+static bool
+init_condition_time(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONDITION_TIME opcode: 0x56 ('V')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ * offset + 2 (8 bit): retries / 50
+ *
+ * Check condition "condition number" in the condition table.
+ * Bios code then sleeps for 2ms if the condition is not met, and
+ * repeats up to "retries" times, but on one C51 this has proved
+ * insufficient. In mmiotraces the driver sleeps for 20ms, so we do
+ * this, and bail after "retries" times, or 2s, whichever is less.
+ * If still not met after retries, clear execution flag for this table.
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+ uint16_t retries = bios->data[offset + 2] * 50;
+ unsigned cnt;
+
+ if (!iexec->execute)
+ return true;
+
+ if (retries > 100)
+ retries = 100;
+
+ BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
+ offset, cond, retries);
+
+ if (!bios->execute) /* avoid 2s delays when "faking" execution */
+ retries = 1;
+
+ for (cnt = 0; cnt < retries; cnt++) {
+ if (bios_condition_met(bios, offset, cond)) {
+ BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
+ offset);
+ break;
+ } else {
+ BIOSLOG(bios, "0x%04X: "
+ "Condition not met, sleeping for 20ms\n",
+ offset);
+ msleep(20);
+ }
+ }
+
+ if (!bios_condition_met(bios, offset, cond)) {
+ NV_WARN(bios->dev,
+ "0x%04X: Condition still not met after %dms, "
+ "skipping following opcodes\n", offset, 20 * retries);
+ iexec->execute = false;
+ }
+
+ return true;
+}
+
+static bool
+init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): base register
+ * offset + 5 (8 bit): count
+ * offset + 6 (32 bit): value 1
+ * ...
+ *
+ * Starting at offset + 6 there are "count" 32 bit values.
+ * For "count" iterations set "base register" + 4 * current_iteration
+ * to "value current_iteration"
+ */
+
+ uint32_t basereg = ROM32(bios->data[offset + 1]);
+ uint32_t count = bios->data[offset + 5];
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
+ offset, basereg, count);
+
+ for (i = 0; i < count; i++) {
+ uint32_t reg = basereg + i * 4;
+ uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
+
+ bios_wr32(bios, reg, data);
+ }
+
+ return true;
+}
+
+static bool
+init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_SUB_DIRECT opcode: 0x5B ('[')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): subroutine offset (in bios)
+ *
+ * Calls a subroutine that will execute commands until INIT_DONE
+ * is found.
+ */
+
+ uint16_t sub_offset = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
+ offset, sub_offset);
+
+ parse_init_table(bios, sub_offset, iexec);
+
+ BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
+
+ return true;
+}
+
+static bool
+init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COPY_NV_REG opcode: 0x5F ('_')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): src reg
+ * offset + 5 (8 bit): shift
+ * offset + 6 (32 bit): src mask
+ * offset + 10 (32 bit): xor
+ * offset + 14 (32 bit): dst reg
+ * offset + 18 (32 bit): dst mask
+ *
+ * Shift REGVAL("src reg") right by (signed) "shift", AND result with
+ * "src mask", then XOR with "xor". Write this OR'd with
+ * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
+ */
+
+ uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
+ uint8_t shift = bios->data[offset + 5];
+ uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
+ uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
+ uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
+ uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
+ uint32_t srcvalue, dstvalue;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
+ "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
+ offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
+
+ srcvalue = bios_rd32(bios, srcreg);
+
+ if (shift < 0x80)
+ srcvalue >>= shift;
+ else
+ srcvalue <<= (0x100 - shift);
+
+ srcvalue = (srcvalue & srcmask) ^ xor;
+
+ dstvalue = bios_rd32(bios, dstreg) & dstmask;
+
+ bios_wr32(bios, dstreg, dstvalue | srcvalue);
+
+ return true;
+}
+
+static bool
+init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_INDEX_IO opcode: 0x62 ('b')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): data
+ *
+ * Write "data" to index "CRTC index" of "CRTC port"
+ */
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t data = bios->data[offset + 4];
+
+ if (!iexec->execute)
+ return true;
+
+ bios_idxprt_wr(bios, crtcport, crtcindex, data);
+
+ return true;
+}
+
+static bool
+init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COMPUTE_MEM opcode: 0x63 ('c')
+ *
+ * offset (8 bit): opcode
+ *
+ * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
+ * that the hardware can correctly calculate how much VRAM it has
+ * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
+ *
+ * The implementation of this opcode in general consists of two parts:
+ * 1) determination of the memory bus width
+ * 2) determination of how many of the card's RAM pads have ICs attached
+ *
+ * 1) is done by a cunning combination of writes to offsets 0x1c and
+ * 0x3c in the framebuffer, and seeing whether the written values are
+ * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
+ *
+ * 2) is done by a cunning combination of writes to an offset slightly
+ * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
+ * if the test pattern can be read back. This then affects bits 12-15 of
+ * NV_PFB_CFG0
+ *
+ * In this context a "cunning combination" may include multiple reads
+ * and writes to varying locations, often alternating the test pattern
+ * and 0, doubtless to make sure buffers are filled, residual charges
+ * on tracks are removed etc.
+ *
+ * Unfortunately, the "cunning combination"s mentioned above, and the
+ * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
+ * trace I have.
+ *
+ * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
+ * we started was correct, and use that instead
+ */
+
+ /* no iexec->execute check by design */
+
+ /*
+ * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
+ * and kmmio traces of the binary driver POSTing the card show nothing
+ * being done for this opcode. why is it still listed in the table?!
+ */
+
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ return true;
+
+ /*
+ * On every card I've seen, this step gets done for us earlier in
+ * the init scripts
+ uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
+ bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
+ */
+
+ /*
+ * This also has probably been done in the scripts, but an mmio trace of
+ * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
+ */
+ bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
+
+ /* write back the saved configuration value */
+ bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
+
+ return true;
+}
+
+static bool
+init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_RESET opcode: 0x65 ('e')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): value1
+ * offset + 9 (32 bit): value2
+ *
+ * Assign "value1" to "register", then assign "value2" to "register"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t value1 = ROM32(bios->data[offset + 5]);
+ uint32_t value2 = ROM32(bios->data[offset + 9]);
+ uint32_t pci_nv_19, pci_nv_20;
+
+ /* no iexec->execute check by design */
+
+ pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
+ bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
+ bios_wr32(bios, reg, value1);
+
+ udelay(10);
+
+ bios_wr32(bios, reg, value2);
+ bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
+
+ pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
+ pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
+ bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
+
+ return true;
+}
+
+static bool
+init_configure_mem(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONFIGURE_MEM opcode: 0x66 ('f')
+ *
+ * offset (8 bit): opcode
+ *
+ * Equivalent to INIT_DONE on bios version 3 or greater.
+ * For early bios versions, sets up the memory registers, using values
+ * taken from the memory init table
+ */
+
+ /* no iexec->execute check by design */
+
+ uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
+ uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
+ uint32_t reg, data;
+
+ if (bios->major_version > 2)
+ return false;
+
+ bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
+ bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
+
+ if (bios->data[meminitoffs] & 1)
+ seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
+
+ for (reg = ROM32(bios->data[seqtbloffs]);
+ reg != 0xffffffff;
+ reg = ROM32(bios->data[seqtbloffs += 4])) {
+
+ switch (reg) {
+ case NV_PFB_PRE:
+ data = NV_PFB_PRE_CMD_PRECHARGE;
+ break;
+ case NV_PFB_PAD:
+ data = NV_PFB_PAD_CKE_NORMAL;
+ break;
+ case NV_PFB_REF:
+ data = NV_PFB_REF_CMD_REFRESH;
+ break;
+ default:
+ data = ROM32(bios->data[meminitdata]);
+ meminitdata += 4;
+ if (data == 0xffffffff)
+ continue;
+ }
+
+ bios_wr32(bios, reg, data);
+ }
+
+ return true;
+}
+
+static bool
+init_configure_clk(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONFIGURE_CLK opcode: 0x67 ('g')
+ *
+ * offset (8 bit): opcode
+ *
+ * Equivalent to INIT_DONE on bios version 3 or greater.
+ * For early bios versions, sets up the NVClk and MClk PLLs, using
+ * values taken from the memory init table
+ */
+
+ /* no iexec->execute check by design */
+
+ uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
+ int clock;
+
+ if (bios->major_version > 2)
+ return false;
+
+ clock = ROM16(bios->data[meminitoffs + 4]) * 10;
+ setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
+
+ clock = ROM16(bios->data[meminitoffs + 2]) * 10;
+ if (bios->data[meminitoffs] & 1) /* DDR */
+ clock *= 2;
+ setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
+
+ return true;
+}
+
+static bool
+init_configure_preinit(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h')
+ *
+ * offset (8 bit): opcode
+ *
+ * Equivalent to INIT_DONE on bios version 3 or greater.
+ * For early bios versions, does early init, loading ram and crystal
+ * configuration from straps into CR3C
+ */
+
+ /* no iexec->execute check by design */
+
+ uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
+ uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
+
+ if (bios->major_version > 2)
+ return false;
+
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
+ NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
+
+ return true;
+}
+
+static bool
+init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_IO opcode: 0x69 ('i')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): mask
+ * offset + 4 (8 bit): data
+ *
+ * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
+ */
+
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t mask = bios->data[offset + 3];
+ uint8_t data = bios->data[offset + 4];
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
+ offset, crtcport, mask, data);
+
+ /*
+ * I have no idea what this does, but NVIDIA do this magic sequence
+ * in the places where this INIT_IO happens..
+ */
+ if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
+ int i;
+
+ bios_wr32(bios, 0x614100, (bios_rd32(
+ bios, 0x614100) & 0x0fffffff) | 0x00800000);
+
+ bios_wr32(bios, 0x00e18c, bios_rd32(
+ bios, 0x00e18c) | 0x00020000);
+
+ bios_wr32(bios, 0x614900, (bios_rd32(
+ bios, 0x614900) & 0x0fffffff) | 0x00800000);
+
+ bios_wr32(bios, 0x000200, bios_rd32(
+ bios, 0x000200) & ~0x40000000);
+
+ mdelay(10);
+
+ bios_wr32(bios, 0x00e18c, bios_rd32(
+ bios, 0x00e18c) & ~0x00020000);
+
+ bios_wr32(bios, 0x000200, bios_rd32(
+ bios, 0x000200) | 0x40000000);
+
+ bios_wr32(bios, 0x614100, 0x00800018);
+ bios_wr32(bios, 0x614900, 0x00800018);
+
+ mdelay(10);
+
+ bios_wr32(bios, 0x614100, 0x10000018);
+ bios_wr32(bios, 0x614900, 0x10000018);
+
+ for (i = 0; i < 3; i++)
+ bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
+ bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
+
+ for (i = 0; i < 2; i++)
+ bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
+ bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
+
+ for (i = 0; i < 3; i++)
+ bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
+ bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
+
+ for (i = 0; i < 2; i++)
+ bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
+ bios, 0x614200 + (i*0x800)) & 0xfffffff0);
+
+ for (i = 0; i < 2; i++)
+ bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
+ bios, 0x614108 + (i*0x800)) & 0x0fffffff);
+ return true;
+ }
+
+ bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
+ data);
+ return true;
+}
+
+static bool
+init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_SUB opcode: 0x6B ('k')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): script number
+ *
+ * Execute script number "script number", as a subroutine
+ */
+
+ uint8_t sub = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
+
+ parse_init_table(bios,
+ ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
+ iexec);
+
+ BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
+
+ return true;
+}
+
+static bool
+init_ram_condition(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_RAM_CONDITION opcode: 0x6D ('m')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): mask
+ * offset + 2 (8 bit): cmpval
+ *
+ * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t mask = bios->data[offset + 1];
+ uint8_t cmpval = bios->data[offset + 2];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return true;
+
+ data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
+ offset, data, cmpval);
+
+ if (data == cmpval)
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return true;
+}
+
+static bool
+init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_NV_REG opcode: 0x6E ('n')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): mask
+ * offset + 9 (32 bit): data
+ *
+ * Assign ((REGVAL("register") & "mask") | "data") to "register"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t mask = ROM32(bios->data[offset + 5]);
+ uint32_t data = ROM32(bios->data[offset + 9]);
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
+ offset, reg, mask, data);
+
+ bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
+
+ return true;
+}
+
+static bool
+init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_MACRO opcode: 0x6F ('o')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): macro number
+ *
+ * Look up macro index "macro number" in the macro index table.
+ * The macro index table entry has 1 byte for the index in the macro
+ * table, and 1 byte for the number of times to repeat the macro.
+ * The macro table entry has 4 bytes for the register address and
+ * 4 bytes for the value to write to that register
+ */
+
+ uint8_t macro_index_tbl_idx = bios->data[offset + 1];
+ uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
+ uint8_t macro_tbl_idx = bios->data[tmp];
+ uint8_t count = bios->data[tmp + 1];
+ uint32_t reg, data;
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, macro_index_tbl_idx, macro_tbl_idx, count);
+
+ for (i = 0; i < count; i++) {
+ uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
+
+ reg = ROM32(bios->data[macroentryptr]);
+ data = ROM32(bios->data[macroentryptr + 4]);
+
+ bios_wr32(bios, reg, data);
+ }
+
+ return true;
+}
+
+static bool
+init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_DONE opcode: 0x71 ('q')
+ *
+ * offset (8 bit): opcode
+ *
+ * End the current script
+ */
+
+ /* mild retval abuse to stop parsing this table */
+ return false;
+}
+
+static bool
+init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_RESUME opcode: 0x72 ('r')
+ *
+ * offset (8 bit): opcode
+ *
+ * End the current execute / no-execute condition
+ */
+
+ if (iexec->execute)
+ return true;
+
+ iexec->execute = true;
+ BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
+
+ return true;
+}
+
+static bool
+init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_TIME opcode: 0x74 ('t')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): time
+ *
+ * Sleep for "time" microseconds.
+ */
+
+ unsigned time = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
+ offset, time);
+
+ if (time < 1000)
+ udelay(time);
+ else
+ msleep((time + 900) / 1000);
+
+ return true;
+}
+
+static bool
+init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_CONDITION opcode: 0x75 ('u')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ *
+ * Check condition "condition number" in the condition table.
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
+
+ if (bios_condition_met(bios, offset, cond))
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return true;
+}
+
+static bool
+init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_CONDITION opcode: 0x76
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ *
+ * Check condition "condition number" in the io condition table.
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
+
+ if (io_condition_met(bios, offset, cond))
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return true;
+}
+
+static bool
+init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_INDEX_IO opcode: 0x78 ('x')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): data
+ *
+ * Read value at index "CRTC index" on "CRTC port", AND with "mask",
+ * OR with "data", write-back
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t data = bios->data[offset + 5];
+ uint8_t value;
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Data: 0x%02X\n",
+ offset, crtcport, crtcindex, mask, data);
+
+ value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
+ bios_idxprt_wr(bios, crtcport, crtcindex, value);
+
+ return true;
+}
+
+static bool
+init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_PLL opcode: 0x79 ('y')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (16 bit): freq
+ *
+ * Set PLL register "register" to coefficients for frequency (10kHz)
+ * "freq"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint16_t freq = ROM16(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return true;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
+
+ setPLL(bios, reg, freq * 10);
+
+ return true;
+}
+
+static bool
+init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_REG opcode: 0x7A ('z')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): value
+ *
+ * Assign "value" to "register"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t value = ROM32(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return true;
+
+ if (reg == 0x000200)
+ value |= 1;
+
+ bios_wr32(bios, reg, value);
+
+ return true;
+}
+
+static bool
+init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): PLL type
+ * offset + 2 (32 bit): frequency 0
+ *
+ * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
+ * ram_restrict_table_ptr. The value read from there is used to select
+ * a frequency from the table starting at 'frequency 0' to be
+ * programmed into the PLL corresponding to 'type'.
+ *
+ * The PLL limits table on cards using this opcode has a mapping of
+ * 'type' to the relevant registers.
+ */
+
+ struct drm_device *dev = bios->dev;
+ uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
+ uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
+ uint8_t type = bios->data[offset + 1];
+ uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
+ uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
+ NV_ERROR(dev, "PLL limits table not version 3.x\n");
+ return true; /* deliberate, allow default clocks to remain */
+ }
+
+ entry = pll_limits + pll_limits[1];
+ for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
+ if (entry[0] == type) {
+ uint32_t reg = ROM32(entry[3]);
+
+ BIOSLOG(bios, "0x%04X: "
+ "Type %02x Reg 0x%08x Freq %dKHz\n",
+ offset, type, reg, freq);
+
+ setPLL(bios, reg, freq);
+ return true;
+ }
+ }
+
+ NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
+ return true;
+}
+
+static bool
+init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_8C opcode: 0x8C ('')
+ *
+ * NOP so far....
+ *
+ */
+
+ return true;
+}
+
+static bool
+init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_8D opcode: 0x8D ('')
+ *
+ * NOP so far....
+ *
+ */
+
+ return true;
+}
+
+static bool
+init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_GPIO opcode: 0x8E ('')
+ *
+ * offset (8 bit): opcode
+ *
+ * Loop over all entries in the DCB GPIO table, and initialise
+ * each GPIO according to various values listed in each entry
+ */
+
+ const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+ const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
+ const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
+ const uint8_t *gpio_entry;
+ int i;
+
+ if (bios->bdcb.version != 0x40) {
+ NV_ERROR(bios->dev, "DCB table not version 4.0\n");
+ return false;
+ }
+
+ if (!bios->bdcb.gpio_table_ptr) {
+ NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
+ return false;
+ }
+
+ gpio_entry = gpio_table + gpio_table[1];
+ for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) {
+ uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
+ int line = (entry & 0x0000001f);
+
+ BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry);
+
+ if ((entry & 0x0000ff00) == 0x0000ff00)
+ continue;
+
+ r = nv50_gpio_reg[line >> 3];
+ s = (line & 0x07) << 2;
+ v = bios_rd32(bios, r) & ~(0x00000003 << s);
+ if (entry & 0x01000000)
+ v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
+ else
+ v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
+ bios_wr32(bios, r, v);
+
+ r = nv50_gpio_ctl[line >> 4];
+ s = (line & 0x0f);
+ v = bios_rd32(bios, r) & ~(0x00010001 << s);
+ switch ((entry & 0x06000000) >> 25) {
+ case 1:
+ v |= (0x00000001 << s);
+ break;
+ case 2:
+ v |= (0x00010000 << s);
+ break;
+ default:
+ break;
+ }
+ bios_wr32(bios, r, v);
+ }
+
+ return true;
+}
+
+/* hack to avoid moving the itbl_entry array before this function */
+int init_ram_restrict_zm_reg_group_blocklen;
+
+static bool
+init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): reg
+ * offset + 5 (8 bit): regincrement
+ * offset + 6 (8 bit): count
+ * offset + 7 (32 bit): value 1,1
+ * ...
+ *
+ * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
+ * ram_restrict_table_ptr. The value read from here is 'n', and
+ * "value 1,n" gets written to "reg". This repeats "count" times and on
+ * each iteration 'm', "reg" increases by "regincrement" and
+ * "value m,n" is used. The extent of n is limited by a number read
+ * from the 'M' BIT table, herein called "blocklen"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint8_t regincrement = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 6];
+ uint32_t strap_ramcfg, data;
+ uint16_t blocklen;
+ uint8_t index;
+ int i;
+
+ /* previously set by 'M' BIT table */
+ blocklen = init_ram_restrict_zm_reg_group_blocklen;
+
+ if (!iexec->execute)
+ return true;
+
+ if (!blocklen) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Zero block length - has the M table "
+ "been parsed?\n", offset);
+ return false;
+ }
+
+ strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
+ index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
+ "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
+ offset, reg, regincrement, count, strap_ramcfg, index);
+
+ for (i = 0; i < count; i++) {
+ data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
+
+ bios_wr32(bios, reg, data);
+
+ reg += regincrement;
+ }
+
+ return true;
+}
+
+static bool
+init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COPY_ZM_REG opcode: 0x90 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): src reg
+ * offset + 5 (32 bit): dst reg
+ *
+ * Put contents of "src reg" into "dst reg"
+ */
+
+ uint32_t srcreg = ROM32(bios->data[offset + 1]);
+ uint32_t dstreg = ROM32(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return true;
+
+ bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
+
+ return true;
+}
+
+static bool
+init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): dst reg
+ * offset + 5 (8 bit): count
+ * offset + 6 (32 bit): data 1
+ * ...
+ *
+ * For each of "count" values write "data n" to "dst reg"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint8_t count = bios->data[offset + 5];
+ int i;
+
+ if (!iexec->execute)
+ return true;
+
+ for (i = 0; i < count; i++) {
+ uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
+ bios_wr32(bios, reg, data);
+ }
+
+ return true;
+}
+
+static bool
+init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_RESERVED opcode: 0x92 ('')
+ *
+ * offset (8 bit): opcode
+ *
+ * Seemingly does nothing
+ */
+
+ return true;
+}
+
+static bool
+init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_96 opcode: 0x96 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): sreg
+ * offset + 5 (8 bit): sshift
+ * offset + 6 (8 bit): smask
+ * offset + 7 (8 bit): index
+ * offset + 8 (32 bit): reg
+ * offset + 12 (32 bit): mask
+ * offset + 16 (8 bit): shift
+ *
+ */
+
+ uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
+ uint32_t reg = ROM32(bios->data[offset + 8]);
+ uint32_t mask = ROM32(bios->data[offset + 12]);
+ uint32_t val;
+
+ val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
+ if (bios->data[offset + 5] < 0x80)
+ val >>= bios->data[offset + 5];
+ else
+ val <<= (0x100 - bios->data[offset + 5]);
+ val &= bios->data[offset + 6];
+
+ val = bios->data[ROM16(bios->data[xlatptr]) + val];
+ val <<= bios->data[offset + 16];
+
+ if (!iexec->execute)
+ return true;
+
+ bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
+ return true;
+}
+
+static bool
+init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_97 opcode: 0x97 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): mask
+ * offset + 9 (32 bit): value
+ *
+ * Adds "value" to "register" preserving the fields specified
+ * by "mask"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t mask = ROM32(bios->data[offset + 5]);
+ uint32_t add = ROM32(bios->data[offset + 9]);
+ uint32_t val;
+
+ val = bios_rd32(bios, reg);
+ val = (val & mask) | ((val + add) & ~mask);
+
+ if (!iexec->execute)
+ return true;
+
+ bios_wr32(bios, reg, val);
+ return true;
+}
+
+static bool
+init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_AUXCH opcode: 0x98 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): address
+ * offset + 5 (8 bit): count
+ * offset + 6 (8 bit): mask 0
+ * offset + 7 (8 bit): data 0
+ * ...
+ *
+ */
+
+ struct drm_device *dev = bios->dev;
+ struct nouveau_i2c_chan *auxch;
+ uint32_t addr = ROM32(bios->data[offset + 1]);
+ uint8_t len = bios->data[offset + 5];
+ int ret, i;
+
+ if (!bios->display.output) {
+ NV_ERROR(dev, "INIT_AUXCH: no active output\n");
+ return false;
+ }
+
+ auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
+ if (!auxch) {
+ NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
+ bios->display.output->i2c_index);
+ return false;
+ }
+
+ if (!iexec->execute)
+ return true;
+
+ offset += 6;
+ for (i = 0; i < len; i++, offset += 2) {
+ uint8_t data;
+
+ ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
+ if (ret) {
+ NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
+ return false;
+ }
+
+ data &= bios->data[offset + 0];
+ data |= bios->data[offset + 1];
+
+ ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
+ if (ret) {
+ NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_AUXCH opcode: 0x99 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): address
+ * offset + 5 (8 bit): count
+ * offset + 6 (8 bit): data 0
+ * ...
+ *
+ */
+
+ struct drm_device *dev = bios->dev;
+ struct nouveau_i2c_chan *auxch;
+ uint32_t addr = ROM32(bios->data[offset + 1]);
+ uint8_t len = bios->data[offset + 5];
+ int ret, i;
+
+ if (!bios->display.output) {
+ NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
+ return false;
+ }
+
+ auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
+ if (!auxch) {
+ NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
+ bios->display.output->i2c_index);
+ return false;
+ }
+
+ if (!iexec->execute)
+ return true;
+
+ offset += 6;
+ for (i = 0; i < len; i++, offset++) {
+ ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
+ if (ret) {
+ NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static struct init_tbl_entry itbl_entry[] = {
+ /* command name , id , length , offset , mult , command handler */
+ /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
+ { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog },
+ { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat },
+ { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll },
+ { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat },
+ { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy },
+ { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not },
+ { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition },
+ { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched },
+ { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 },
+ { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 },
+ { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte },
+ { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte },
+ { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c },
+ { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds },
+ { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group },
+ { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch },
+ { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr },
+ { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr },
+ { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group },
+ { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time },
+ { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence },
+ /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
+ { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct },
+ { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg },
+ { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io },
+ { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem },
+ { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset },
+ { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem },
+ { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk },
+ { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit },
+ { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io },
+ { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub },
+ { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition },
+ { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg },
+ { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro },
+ { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done },
+ { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume },
+ /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
+ { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time },
+ { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition },
+ { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition },
+ { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io },
+ { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll },
+ { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg },
+ /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */
+ { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll },
+ { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c },
+ { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d },
+ { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio },
+ /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */
+ { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group },
+ { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg },
+ { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched },
+ { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved },
+ { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 },
+ { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 },
+ { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch },
+ { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch },
+ { NULL , 0 , 0 , 0 , 0 , NULL }
+};
+
+static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i)
+{
+ /* Calculates the length of a given init table entry. */
+ return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier;
+}
+
+#define MAX_TABLE_OPS 1000
+
+static int
+parse_init_table(struct nvbios *bios, unsigned int offset,
+ struct init_exec *iexec)
+{
+ /*
+ * Parses all commands in an init table.
+ *
+ * We start out executing all commands found in the init table. Some
+ * opcodes may change the status of iexec->execute to SKIP, which will
+ * cause the following opcodes to perform no operation until the value
+ * is changed back to EXECUTE.
+ */
+
+ int count = 0, i;
+ uint8_t id;
+
+ /*
+ * Loop until INIT_DONE causes us to break out of the loop
+ * (or until offset > bios length just in case... )
+ * (and no more than MAX_TABLE_OPS iterations, just in case... )
+ */
+ while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
+ id = bios->data[offset];
+
+ /* Find matching id in itbl_entry */
+ for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
+ ;
+
+ if (itbl_entry[i].name) {
+ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
+ offset, itbl_entry[i].id, itbl_entry[i].name);
+
+ /* execute eventual command handler */
+ if (itbl_entry[i].handler)
+ if (!(*itbl_entry[i].handler)(bios, offset, iexec))
+ break;
+ } else {
+ NV_ERROR(bios->dev,
+ "0x%04X: Init table command not found: "
+ "0x%02X\n", offset, id);
+ return -ENOENT;
+ }
+
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += get_init_table_entry_length(bios, offset, i);
+ }
+
+ if (offset >= bios->length)
+ NV_WARN(bios->dev,
+ "Offset 0x%04X greater than known bios image length. "
+ "Corrupt image?\n", offset);
+ if (count >= MAX_TABLE_OPS)
+ NV_WARN(bios->dev,
+ "More than %d opcodes to a table is unlikely, "
+ "is the bios image corrupt?\n", MAX_TABLE_OPS);
+
+ return 0;
+}
+
+static void
+parse_init_tables(struct nvbios *bios)
+{
+ /* Loops and calls parse_init_table() for each present table. */
+
+ int i = 0;
+ uint16_t table;
+ struct init_exec iexec = {true, false};
+
+ if (bios->old_style_init) {
+ if (bios->init_script_tbls_ptr)
+ parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
+ if (bios->extra_init_script_tbl_ptr)
+ parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
+
+ return;
+ }
+
+ while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
+ NV_INFO(bios->dev,
+ "Parsing VBIOS init table %d at offset 0x%04X\n",
+ i / 2, table);
+ BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
+
+ parse_init_table(bios, table, &iexec);
+ i += 2;
+ }
+}
+
+static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
+{
+ int compare_record_len, i = 0;
+ uint16_t compareclk, scriptptr = 0;
+
+ if (bios->major_version < 5) /* pre BIT */
+ compare_record_len = 3;
+ else
+ compare_record_len = 4;
+
+ do {
+ compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
+ if (pxclk >= compareclk * 10) {
+ if (bios->major_version < 5) {
+ uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
+ scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
+ } else
+ scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
+ break;
+ }
+ i++;
+ } while (compareclk);
+
+ return scriptptr;
+}
+
+static void
+run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
+ struct dcb_entry *dcbent, int head, bool dl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct init_exec iexec = {true, false};
+
+ NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
+ scriptptr);
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
+ head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
+ /* note: if dcb entries have been merged, index may be misleading */
+ NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
+ parse_init_table(bios, scriptptr, &iexec);
+
+ nv04_dfp_bind_head(dev, dcbent, head, dl);
+}
+
+static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
+ uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+
+ if (!bios->fp.xlated_entry || !sub || !scriptofs)
+ return -EINVAL;
+
+ run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
+
+ if (script == LVDS_PANEL_OFF) {
+ /* off-on delay in ms */
+ msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+ }
+#ifdef __powerpc__
+ /* Powerbook specific quirks */
+ if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+ if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
+ if (script == LVDS_PANEL_ON) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
+ }
+ if (script == LVDS_PANEL_OFF) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+ /*
+ * The BIT LVDS table's header has the information to setup the
+ * necessary registers. Following the standard 4 byte header are:
+ * A bitmask byte and a dual-link transition pxclk value for use in
+ * selecting the init script when not using straps; 4 script pointers
+ * for panel power, selected by output and on/off; and 8 table pointers
+ * for panel init, the needed one determined by output, and bits in the
+ * conf byte. These tables are similar to the TMDS tables, consisting
+ * of a list of pxclks and script pointers.
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
+ uint16_t scriptptr = 0, clktable;
+ uint8_t clktableptr = 0;
+
+ /*
+ * For now we assume version 3.0 table - g80 support will need some
+ * changes
+ */
+
+ switch (script) {
+ case LVDS_INIT:
+ return -ENOSYS;
+ case LVDS_BACKLIGHT_ON:
+ case LVDS_PANEL_ON:
+ scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
+ break;
+ case LVDS_BACKLIGHT_OFF:
+ case LVDS_PANEL_OFF:
+ scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
+ break;
+ case LVDS_RESET:
+ if (dcbent->lvdsconf.use_straps_for_mode) {
+ if (bios->fp.dual_link)
+ clktableptr += 2;
+ if (bios->fp.BITbit1)
+ clktableptr++;
+ } else {
+ /* using EDID */
+ uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+ int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
+
+ if (bios->fp.dual_link) {
+ clktableptr += 2;
+ fallbackcmpval *= 2;
+ }
+ if (fallbackcmpval & fallback)
+ clktableptr++;
+ }
+
+ /* adding outputset * 8 may not be correct */
+ clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
+ if (!clktable) {
+ NV_ERROR(dev, "Pixel clock comparison table not found\n");
+ return -ENOENT;
+ }
+ scriptptr = clkcmptable(bios, clktable, pxclk);
+ }
+
+ if (!scriptptr) {
+ NV_ERROR(dev, "LVDS output init script not found\n");
+ return -ENOENT;
+ }
+ run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
+
+ return 0;
+}
+
+int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+ /*
+ * LVDS operations are multiplexed in an effort to present a single API
+ * which works with two vastly differing underlying structures.
+ * This acts as the demux
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+ uint32_t sel_clk_binding, sel_clk;
+ int ret;
+
+ if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
+ (lvds_ver >= 0x30 && script == LVDS_INIT))
+ return 0;
+
+ if (!bios->fp.lvds_init_run) {
+ bios->fp.lvds_init_run = true;
+ call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
+ }
+
+ if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
+ call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
+ if (script == LVDS_RESET && bios->fp.power_off_for_reset)
+ call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
+
+ NV_TRACE(dev, "Calling LVDS script %d:\n", script);
+
+ /* don't let script change pll->head binding */
+ sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+
+ if (lvds_ver < 0x30)
+ ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
+ else
+ ret = run_lvds_table(dev, dcbent, head, script, pxclk);
+
+ bios->fp.last_script_invoc = (script << 1 | head);
+
+ sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+ /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+
+ return ret;
+}
+
+struct lvdstableheader {
+ uint8_t lvds_ver, headerlen, recordlen;
+};
+
+static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
+{
+ /*
+ * BMP version (0xa) LVDS table has a simple header of version and
+ * record length. The BIT LVDS table has the typical BIT table header:
+ * version byte, header length byte, record length byte, and a byte for
+ * the maximum number of records that can be held in the table.
+ */
+
+ uint8_t lvds_ver, headerlen, recordlen;
+
+ memset(lth, 0, sizeof(struct lvdstableheader));
+
+ if (bios->fp.lvdsmanufacturerpointer == 0x0) {
+ NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
+ return -EINVAL;
+ }
+
+ lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+
+ switch (lvds_ver) {
+ case 0x0a: /* pre NV40 */
+ headerlen = 2;
+ recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+ break;
+ case 0x30: /* NV4x */
+ headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+ if (headerlen < 0x1f) {
+ NV_ERROR(dev, "LVDS table header not understood\n");
+ return -EINVAL;
+ }
+ recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+ break;
+ case 0x40: /* G80/G90 */
+ headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+ if (headerlen < 0x7) {
+ NV_ERROR(dev, "LVDS table header not understood\n");
+ return -EINVAL;
+ }
+ recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+ break;
+ default:
+ NV_ERROR(dev,
+ "LVDS table revision %d.%d not currently supported\n",
+ lvds_ver >> 4, lvds_ver & 0xf);
+ return -ENOSYS;
+ }
+
+ lth->lvds_ver = lvds_ver;
+ lth->headerlen = headerlen;
+ lth->recordlen = recordlen;
+
+ return 0;
+}
+
+static int
+get_fp_strap(struct drm_device *dev, struct nvbios *bios)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * The fp strap is normally dictated by the "User Strap" in
+ * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
+ * Internal_Flags struct at 0x48 is set, the user strap gets overriden
+ * by the PCI subsystem ID during POST, but not before the previous user
+ * strap has been committed to CR58 for CR57=0xf on head A, which may be
+ * read and used instead
+ */
+
+ if (bios->major_version < 5 && bios->data[0x48] & 0x4)
+ return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+
+ if (dev_priv->card_type >= NV_50)
+ return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
+ else
+ return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
+}
+
+static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
+{
+ uint8_t *fptable;
+ uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
+ int ret, ofs, fpstrapping;
+ struct lvdstableheader lth;
+
+ if (bios->fp.fptablepointer == 0x0) {
+ /* Apple cards don't have the fp table; the laptops use DDC */
+ /* The table is also missing on some x86 IGPs */
+#ifndef __powerpc__
+ NV_ERROR(dev, "Pointer to flat panel table invalid\n");
+#endif
+ bios->pub.digital_min_front_porch = 0x4b;
+ return 0;
+ }
+
+ fptable = &bios->data[bios->fp.fptablepointer];
+ fptable_ver = fptable[0];
+
+ switch (fptable_ver) {
+ /*
+ * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
+ * version field, and miss one of the spread spectrum/PWM bytes.
+ * This could affect early GF2Go parts (not seen any appropriate ROMs
+ * though). Here we assume that a version of 0x05 matches this case
+ * (combining with a BMP version check would be better), as the
+ * common case for the panel type field is 0x0005, and that is in
+ * fact what we are reading the first byte of.
+ */
+ case 0x05: /* some NV10, 11, 15, 16 */
+ recordlen = 42;
+ ofs = -1;
+ break;
+ case 0x10: /* some NV15/16, and NV11+ */
+ recordlen = 44;
+ ofs = 0;
+ break;
+ case 0x20: /* NV40+ */
+ headerlen = fptable[1];
+ recordlen = fptable[2];
+ fpentries = fptable[3];
+ /*
+ * fptable[4] is the minimum
+ * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
+ */
+ bios->pub.digital_min_front_porch = fptable[4];
+ ofs = -7;
+ break;
+ default:
+ NV_ERROR(dev,
+ "FP table revision %d.%d not currently supported\n",
+ fptable_ver >> 4, fptable_ver & 0xf);
+ return -ENOSYS;
+ }
+
+ if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
+ return 0;
+
+ ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+ if (ret)
+ return ret;
+
+ if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
+ bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
+ lth.headerlen + 1;
+ bios->fp.xlatwidth = lth.recordlen;
+ }
+ if (bios->fp.fpxlatetableptr == 0x0) {
+ NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
+ return -EINVAL;
+ }
+
+ fpstrapping = get_fp_strap(dev, bios);
+
+ fpindex = bios->data[bios->fp.fpxlatetableptr +
+ fpstrapping * bios->fp.xlatwidth];
+
+ if (fpindex > fpentries) {
+ NV_ERROR(dev, "Bad flat panel table index\n");
+ return -ENOENT;
+ }
+
+ /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
+ if (lth.lvds_ver > 0x10)
+ bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
+
+ /*
+ * If either the strap or xlated fpindex value are 0xf there is no
+ * panel using a strap-derived bios mode present. this condition
+ * includes, but is different from, the DDC panel indicator above
+ */
+ if (fpstrapping == 0xf || fpindex == 0xf)
+ return 0;
+
+ bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
+ recordlen * fpindex + ofs;
+
+ NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
+ ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
+ ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
+ ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
+
+ return 0;
+}
+
+bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
+
+ if (!mode) /* just checking whether we can produce a mode */
+ return bios->fp.mode_ptr;
+
+ memset(mode, 0, sizeof(struct drm_display_mode));
+ /*
+ * For version 1.0 (version in byte 0):
+ * bytes 1-2 are "panel type", including bits on whether Colour/mono,
+ * single/dual link, and type (TFT etc.)
+ * bytes 3-6 are bits per colour in RGBX
+ */
+ mode->clock = ROM16(mode_entry[7]) * 10;
+ /* bytes 9-10 is HActive */
+ mode->hdisplay = ROM16(mode_entry[11]) + 1;
+ /*
+ * bytes 13-14 is HValid Start
+ * bytes 15-16 is HValid End
+ */
+ mode->hsync_start = ROM16(mode_entry[17]) + 1;
+ mode->hsync_end = ROM16(mode_entry[19]) + 1;
+ mode->htotal = ROM16(mode_entry[21]) + 1;
+ /* bytes 23-24, 27-30 similarly, but vertical */
+ mode->vdisplay = ROM16(mode_entry[25]) + 1;
+ mode->vsync_start = ROM16(mode_entry[31]) + 1;
+ mode->vsync_end = ROM16(mode_entry[33]) + 1;
+ mode->vtotal = ROM16(mode_entry[35]) + 1;
+ mode->flags |= (mode_entry[37] & 0x10) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (mode_entry[37] & 0x1) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ /*
+ * bytes 38-39 relate to spread spectrum settings
+ * bytes 40-43 are something to do with PWM
+ */
+
+ mode->status = MODE_OK;
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ return bios->fp.mode_ptr;
+}
+
+int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
+{
+ /*
+ * The LVDS table header is (mostly) described in
+ * parse_lvds_manufacturer_table_header(): the BIT header additionally
+ * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
+ * straps are not being used for the panel, this specifies the frequency
+ * at which modes should be set up in the dual link style.
+ *
+ * Following the header, the BMP (ver 0xa) table has several records,
+ * indexed by a seperate xlat table, indexed in turn by the fp strap in
+ * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
+ * numbers for use by INIT_SUB which controlled panel init and power,
+ * and finally a dword of ms to sleep between power off and on
+ * operations.
+ *
+ * In the BIT versions, the table following the header serves as an
+ * integrated config and xlat table: the records in the table are
+ * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
+ * two bytes - the first as a config byte, the second for indexing the
+ * fp mode table pointed to by the BIT 'D' table
+ *
+ * DDC is not used until after card init, so selecting the correct table
+ * entry and setting the dual link flag for EDID equipped panels,
+ * requiring tests against the native-mode pixel clock, cannot be done
+ * until later, when this function should be called with non-zero pxclk
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
+ struct lvdstableheader lth;
+ uint16_t lvdsofs;
+ int ret, chip_version = bios->pub.chip_version;
+
+ ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+ if (ret)
+ return ret;
+
+ switch (lth.lvds_ver) {
+ case 0x0a: /* pre NV40 */
+ lvdsmanufacturerindex = bios->data[
+ bios->fp.fpxlatemanufacturertableptr +
+ fpstrapping];
+
+ /* we're done if this isn't the EDID panel case */
+ if (!pxclk)
+ break;
+
+ if (chip_version < 0x25) {
+ /* nv17 behaviour
+ *
+ * It seems the old style lvds script pointer is reused
+ * to select 18/24 bit colour depth for EDID panels.
+ */
+ lvdsmanufacturerindex =
+ (bios->legacy.lvds_single_a_script_ptr & 1) ?
+ 2 : 0;
+ if (pxclk >= bios->fp.duallink_transition_clk)
+ lvdsmanufacturerindex++;
+ } else if (chip_version < 0x30) {
+ /* nv28 behaviour (off-chip encoder)
+ *
+ * nv28 does a complex dance of first using byte 121 of
+ * the EDID to choose the lvdsmanufacturerindex, then
+ * later attempting to match the EDID manufacturer and
+ * product IDs in a table (signature 'pidt' (panel id
+ * table?)), setting an lvdsmanufacturerindex of 0 and
+ * an fp strap of the match index (or 0xf if none)
+ */
+ lvdsmanufacturerindex = 0;
+ } else {
+ /* nv31, nv34 behaviour */
+ lvdsmanufacturerindex = 0;
+ if (pxclk >= bios->fp.duallink_transition_clk)
+ lvdsmanufacturerindex = 2;
+ if (pxclk >= 140000)
+ lvdsmanufacturerindex = 3;
+ }
+
+ /*
+ * nvidia set the high nibble of (cr57=f, cr58) to
+ * lvdsmanufacturerindex in this case; we don't
+ */
+ break;
+ case 0x30: /* NV4x */
+ case 0x40: /* G80/G90 */
+ lvdsmanufacturerindex = fpstrapping;
+ break;
+ default:
+ NV_ERROR(dev, "LVDS table revision not currently supported\n");
+ return -ENOSYS;
+ }
+
+ lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
+ switch (lth.lvds_ver) {
+ case 0x0a:
+ bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
+ bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
+ bios->fp.dual_link = bios->data[lvdsofs] & 4;
+ bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
+ *if_is_24bit = bios->data[lvdsofs] & 16;
+ break;
+ case 0x30:
+ /*
+ * My money would be on there being a 24 bit interface bit in
+ * this table, but I have no example of a laptop bios with a
+ * 24 bit panel to confirm that. Hence we shout loudly if any
+ * bit other than bit 0 is set (I've not even seen bit 1)
+ */
+ if (bios->data[lvdsofs] > 1)
+ NV_ERROR(dev,
+ "You have a very unusual laptop display; please report it\n");
+ /*
+ * No sign of the "power off for reset" or "reset for panel
+ * on" bits, but it's safer to assume we should
+ */
+ bios->fp.power_off_for_reset = true;
+ bios->fp.reset_after_pclk_change = true;
+ /*
+ * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
+ * over-written, and BITbit1 isn't used
+ */
+ bios->fp.dual_link = bios->data[lvdsofs] & 1;
+ bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
+ bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+ break;
+ case 0x40:
+ bios->fp.dual_link = bios->data[lvdsofs] & 1;
+ bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
+ bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+ bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+ break;
+ }
+
+ /* set dual_link flag for EDID case */
+ if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
+ bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
+
+ *dl = bios->fp.dual_link;
+
+ return 0;
+}
+
+static uint8_t *
+bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
+ uint16_t record, int record_len, int record_nr)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint32_t entry;
+ uint16_t table;
+ int i, v;
+
+ for (i = 0; i < record_nr; i++, record += record_len) {
+ table = ROM16(bios->data[record]);
+ if (!table)
+ continue;
+ entry = ROM32(bios->data[table]);
+
+ v = (entry & 0x000f0000) >> 16;
+ if (!(v & dcbent->or))
+ continue;
+
+ v = (entry & 0x000000f0) >> 4;
+ if (v != dcbent->location)
+ continue;
+
+ v = (entry & 0x0000000f);
+ if (v != dcbent->type)
+ continue;
+
+ return &bios->data[table];
+ }
+
+ return NULL;
+}
+
+void *
+nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
+ int *length)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t *table;
+
+ if (!bios->display.dp_table_ptr) {
+ NV_ERROR(dev, "No pointer to DisplayPort table\n");
+ return NULL;
+ }
+ table = &bios->data[bios->display.dp_table_ptr];
+
+ if (table[0] != 0x21) {
+ NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
+ table[0]);
+ return NULL;
+ }
+
+ *length = table[4];
+ return bios_output_config_match(dev, dcbent,
+ bios->display.dp_table_ptr + table[1],
+ table[2], table[3]);
+}
+
+int
+nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
+ uint32_t sub, int pxclk)
+{
+ /*
+ * The display script table is located by the BIT 'U' table.
+ *
+ * It contains an array of pointers to various tables describing
+ * a particular output type. The first 32-bits of the output
+ * tables contains similar information to a DCB entry, and is
+ * used to decide whether that particular table is suitable for
+ * the output you want to access.
+ *
+ * The "record header length" field here seems to indicate the
+ * offset of the first configuration entry in the output tables.
+ * This is 10 on most cards I've seen, but 12 has been witnessed
+ * on DP cards, and there's another script pointer within the
+ * header.
+ *
+ * offset + 0 ( 8 bits): version
+ * offset + 1 ( 8 bits): header length
+ * offset + 2 ( 8 bits): record length
+ * offset + 3 ( 8 bits): number of records
+ * offset + 4 ( 8 bits): record header length
+ * offset + 5 (16 bits): pointer to first output script table
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct init_exec iexec = {true, false};
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t *table = &bios->data[bios->display.script_table_ptr];
+ uint8_t *otable = NULL;
+ uint16_t script;
+ int i = 0;
+
+ if (!bios->display.script_table_ptr) {
+ NV_ERROR(dev, "No pointer to output script table\n");
+ return 1;
+ }
+
+ /*
+ * Nothing useful has been in any of the pre-2.0 tables I've seen,
+ * so until they are, we really don't need to care.
+ */
+ if (table[0] < 0x20)
+ return 1;
+
+ if (table[0] != 0x20 && table[0] != 0x21) {
+ NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
+ table[0]);
+ return 1;
+ }
+
+ /*
+ * The output script tables describing a particular output type
+ * look as follows:
+ *
+ * offset + 0 (32 bits): output this table matches (hash of DCB)
+ * offset + 4 ( 8 bits): unknown
+ * offset + 5 ( 8 bits): number of configurations
+ * offset + 6 (16 bits): pointer to some script
+ * offset + 8 (16 bits): pointer to some script
+ *
+ * headerlen == 10
+ * offset + 10 : configuration 0
+ *
+ * headerlen == 12
+ * offset + 10 : pointer to some script
+ * offset + 12 : configuration 0
+ *
+ * Each config entry is as follows:
+ *
+ * offset + 0 (16 bits): unknown, assumed to be a match value
+ * offset + 2 (16 bits): pointer to script table (clock set?)
+ * offset + 4 (16 bits): pointer to script table (reset?)
+ *
+ * There doesn't appear to be a count value to say how many
+ * entries exist in each script table, instead, a 0 value in
+ * the first 16-bit word seems to indicate both the end of the
+ * list and the default entry. The second 16-bit word in the
+ * script tables is a pointer to the script to execute.
+ */
+
+ NV_DEBUG(dev, "Searching for output entry for %d %d %d\n",
+ dcbent->type, dcbent->location, dcbent->or);
+ otable = bios_output_config_match(dev, dcbent, table[1] +
+ bios->display.script_table_ptr,
+ table[2], table[3]);
+ if (!otable) {
+ NV_ERROR(dev, "Couldn't find matching output script table\n");
+ return 1;
+ }
+
+ if (pxclk < -2 || pxclk > 0) {
+ /* Try to find matching script table entry */
+ for (i = 0; i < otable[5]; i++) {
+ if (ROM16(otable[table[4] + i*6]) == sub)
+ break;
+ }
+
+ if (i == otable[5]) {
+ NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
+ "using first\n",
+ sub, dcbent->type, dcbent->or);
+ i = 0;
+ }
+ }
+
+ bios->display.output = dcbent;
+
+ if (pxclk == 0) {
+ script = ROM16(otable[6]);
+ if (!script) {
+ NV_DEBUG(dev, "output script 0 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk == -1) {
+ script = ROM16(otable[8]);
+ if (!script) {
+ NV_DEBUG(dev, "output script 1 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk == -2) {
+ if (table[4] >= 12)
+ script = ROM16(otable[10]);
+ else
+ script = 0;
+ if (!script) {
+ NV_DEBUG(dev, "output script 2 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk > 0) {
+ script = ROM16(otable[table[4] + i*6 + 2]);
+ if (script)
+ script = clkcmptable(bios, script, pxclk);
+ if (!script) {
+ NV_ERROR(dev, "clock script 0 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk < 0) {
+ script = ROM16(otable[table[4] + i*6 + 4]);
+ if (script)
+ script = clkcmptable(bios, script, -pxclk);
+ if (!script) {
+ NV_DEBUG(dev, "clock script 1 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+ parse_init_table(bios, script, &iexec);
+ }
+
+ return 0;
+}
+
+
+int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
+{
+ /*
+ * the pxclk parameter is in kHz
+ *
+ * This runs the TMDS regs setting code found on BIT bios cards
+ *
+ * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
+ * ffs(or) == 3, use the second.
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int cv = bios->pub.chip_version;
+ uint16_t clktable = 0, scriptptr;
+ uint32_t sel_clk_binding, sel_clk;
+
+ /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
+ if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
+ dcbent->location != DCB_LOC_ON_CHIP)
+ return 0;
+
+ switch (ffs(dcbent->or)) {
+ case 1:
+ clktable = bios->tmds.output0_script_ptr;
+ break;
+ case 2:
+ case 3:
+ clktable = bios->tmds.output1_script_ptr;
+ break;
+ }
+
+ if (!clktable) {
+ NV_ERROR(dev, "Pixel clock comparison table not found\n");
+ return -EINVAL;
+ }
+
+ scriptptr = clkcmptable(bios, clktable, pxclk);
+
+ if (!scriptptr) {
+ NV_ERROR(dev, "TMDS output init script not found\n");
+ return -ENOENT;
+ }
+
+ /* don't let script change pll->head binding */
+ sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+ run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
+ sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+
+ return 0;
+}
+
+int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
+{
+ /*
+ * PLL limits table
+ *
+ * Version 0x10: NV30, NV31
+ * One byte header (version), one record of 24 bytes
+ * Version 0x11: NV36 - Not implemented
+ * Seems to have same record style as 0x10, but 3 records rather than 1
+ * Version 0x20: Found on Geforce 6 cards
+ * Trivial 4 byte BIT header. 31 (0x1f) byte record length
+ * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
+ * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
+ * length in general, some (integrated) have an extra configuration byte
+ * Version 0x30: Found on Geforce 8, separates the register mapping
+ * from the limits tables.
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int cv = bios->pub.chip_version, pllindex = 0;
+ uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
+ uint32_t crystal_strap_mask, crystal_straps;
+
+ if (!bios->pll_limit_tbl_ptr) {
+ if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+ cv >= 0x40) {
+ NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
+ return -EINVAL;
+ }
+ } else
+ pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
+
+ crystal_strap_mask = 1 << 6;
+ /* open coded dev->twoHeads test */
+ if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
+ crystal_strap_mask |= 1 << 22;
+ crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
+ crystal_strap_mask;
+
+ switch (pll_lim_ver) {
+ /*
+ * We use version 0 to indicate a pre limit table bios (single stage
+ * pll) and load the hard coded limits instead.
+ */
+ case 0:
+ break;
+ case 0x10:
+ case 0x11:
+ /*
+ * Strictly v0x11 has 3 entries, but the last two don't seem
+ * to get used.
+ */
+ headerlen = 1;
+ recordlen = 0x18;
+ entries = 1;
+ pllindex = 0;
+ break;
+ case 0x20:
+ case 0x21:
+ case 0x30:
+ case 0x40:
+ headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
+ recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
+ entries = bios->data[bios->pll_limit_tbl_ptr + 3];
+ break;
+ default:
+ NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
+ "supported\n", pll_lim_ver);
+ return -ENOSYS;
+ }
+
+ /* initialize all members to zero */
+ memset(pll_lim, 0, sizeof(struct pll_lims));
+
+ if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
+ uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
+
+ pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
+ pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
+ pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
+ pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
+ pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
+ pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
+ pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
+
+ /* these values taken from nv30/31/36 */
+ pll_lim->vco1.min_n = 0x1;
+ if (cv == 0x36)
+ pll_lim->vco1.min_n = 0x5;
+ pll_lim->vco1.max_n = 0xff;
+ pll_lim->vco1.min_m = 0x1;
+ pll_lim->vco1.max_m = 0xd;
+ pll_lim->vco2.min_n = 0x4;
+ /*
+ * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
+ * table version (apart from nv35)), N2 is compared to
+ * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
+ * save a comparison
+ */
+ pll_lim->vco2.max_n = 0x28;
+ if (cv == 0x30 || cv == 0x35)
+ /* only 5 bits available for N2 on nv30/35 */
+ pll_lim->vco2.max_n = 0x1f;
+ pll_lim->vco2.min_m = 0x1;
+ pll_lim->vco2.max_m = 0x4;
+ pll_lim->max_log2p = 0x7;
+ pll_lim->max_usable_log2p = 0x6;
+ } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
+ uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
+ uint32_t reg = 0; /* default match */
+ uint8_t *pll_rec;
+ int i;
+
+ /*
+ * First entry is default match, if nothing better. warn if
+ * reg field nonzero
+ */
+ if (ROM32(bios->data[plloffs]))
+ NV_WARN(dev, "Default PLL limit entry has non-zero "
+ "register field\n");
+
+ if (limit_match > MAX_PLL_TYPES)
+ /* we've been passed a reg as the match */
+ reg = limit_match;
+ else /* limit match is a pll type */
+ for (i = 1; i < entries && !reg; i++) {
+ uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
+
+ if (limit_match == NVPLL &&
+ (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
+ reg = cmpreg;
+ if (limit_match == MPLL &&
+ (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
+ reg = cmpreg;
+ if (limit_match == VPLL1 &&
+ (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
+ reg = cmpreg;
+ if (limit_match == VPLL2 &&
+ (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
+ reg = cmpreg;
+ }
+
+ for (i = 1; i < entries; i++)
+ if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+ pllindex = i;
+ break;
+ }
+
+ pll_rec = &bios->data[plloffs + recordlen * pllindex];
+
+ BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
+ pllindex ? reg : 0);
+
+ /*
+ * Frequencies are stored in tables in MHz, kHz are more
+ * useful, so we convert.
+ */
+
+ /* What output frequencies can each VCO generate? */
+ pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
+ pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
+ pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
+ pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
+
+ /* What input frequencies they accept (past the m-divider)? */
+ pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
+ pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
+ pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
+ pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
+
+ /* What values are accepted as multiplier and divider? */
+ pll_lim->vco1.min_n = pll_rec[20];
+ pll_lim->vco1.max_n = pll_rec[21];
+ pll_lim->vco1.min_m = pll_rec[22];
+ pll_lim->vco1.max_m = pll_rec[23];
+ pll_lim->vco2.min_n = pll_rec[24];
+ pll_lim->vco2.max_n = pll_rec[25];
+ pll_lim->vco2.min_m = pll_rec[26];
+ pll_lim->vco2.max_m = pll_rec[27];
+
+ pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
+ if (pll_lim->max_log2p > 0x7)
+ /* pll decoding in nv_hw.c assumes never > 7 */
+ NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
+ pll_lim->max_log2p);
+ if (cv < 0x60)
+ pll_lim->max_usable_log2p = 0x6;
+ pll_lim->log2p_bias = pll_rec[30];
+
+ if (recordlen > 0x22)
+ pll_lim->refclk = ROM32(pll_rec[31]);
+
+ if (recordlen > 0x23 && pll_rec[35])
+ NV_WARN(dev,
+ "Bits set in PLL configuration byte (%x)\n",
+ pll_rec[35]);
+
+ /* C51 special not seen elsewhere */
+ if (cv == 0x51 && !pll_lim->refclk) {
+ uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
+
+ if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
+ ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+ if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
+ pll_lim->refclk = 200000;
+ else
+ pll_lim->refclk = 25000;
+ }
+ }
+ } else if (pll_lim_ver == 0x30) { /* ver 0x30 */
+ uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
+ uint8_t *record = NULL;
+ int i;
+
+ BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
+ limit_match);
+
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ if (ROM32(entry[3]) == limit_match) {
+ record = &bios->data[ROM16(entry[1])];
+ break;
+ }
+ }
+
+ if (!record) {
+ NV_ERROR(dev, "Register 0x%08x not found in PLL "
+ "limits table", limit_match);
+ return -ENOENT;
+ }
+
+ pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
+ pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
+ pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
+ pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
+ pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
+ pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
+ pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
+ pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
+ pll_lim->vco1.min_n = record[16];
+ pll_lim->vco1.max_n = record[17];
+ pll_lim->vco1.min_m = record[18];
+ pll_lim->vco1.max_m = record[19];
+ pll_lim->vco2.min_n = record[20];
+ pll_lim->vco2.max_n = record[21];
+ pll_lim->vco2.min_m = record[22];
+ pll_lim->vco2.max_m = record[23];
+ pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
+ pll_lim->log2p_bias = record[27];
+ pll_lim->refclk = ROM32(record[28]);
+ } else if (pll_lim_ver) { /* ver 0x40 */
+ uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
+ uint8_t *record = NULL;
+ int i;
+
+ BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
+ limit_match);
+
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ if (ROM32(entry[3]) == limit_match) {
+ record = &bios->data[ROM16(entry[1])];
+ break;
+ }
+ }
+
+ if (!record) {
+ NV_ERROR(dev, "Register 0x%08x not found in PLL "
+ "limits table", limit_match);
+ return -ENOENT;
+ }
+
+ pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
+ pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
+ pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
+ pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
+ pll_lim->vco1.min_m = record[8];
+ pll_lim->vco1.max_m = record[9];
+ pll_lim->vco1.min_n = record[10];
+ pll_lim->vco1.max_n = record[11];
+ pll_lim->min_p = record[12];
+ pll_lim->max_p = record[13];
+ /* where did this go to?? */
+ if (limit_match == 0x00614100 || limit_match == 0x00614900)
+ pll_lim->refclk = 27000;
+ else
+ pll_lim->refclk = 100000;
+ }
+
+ /*
+ * By now any valid limit table ought to have set a max frequency for
+ * vco1, so if it's zero it's either a pre limit table bios, or one
+ * with an empty limit table (seen on nv18)
+ */
+ if (!pll_lim->vco1.maxfreq) {
+ pll_lim->vco1.minfreq = bios->fminvco;
+ pll_lim->vco1.maxfreq = bios->fmaxvco;
+ pll_lim->vco1.min_inputfreq = 0;
+ pll_lim->vco1.max_inputfreq = INT_MAX;
+ pll_lim->vco1.min_n = 0x1;
+ pll_lim->vco1.max_n = 0xff;
+ pll_lim->vco1.min_m = 0x1;
+ if (crystal_straps == 0) {
+ /* nv05 does this, nv11 doesn't, nv10 unknown */
+ if (cv < 0x11)
+ pll_lim->vco1.min_m = 0x7;
+ pll_lim->vco1.max_m = 0xd;
+ } else {
+ if (cv < 0x11)
+ pll_lim->vco1.min_m = 0x8;
+ pll_lim->vco1.max_m = 0xe;
+ }
+ if (cv < 0x17 || cv == 0x1a || cv == 0x20)
+ pll_lim->max_log2p = 4;
+ else
+ pll_lim->max_log2p = 5;
+ pll_lim->max_usable_log2p = pll_lim->max_log2p;
+ }
+
+ if (!pll_lim->refclk)
+ switch (crystal_straps) {
+ case 0:
+ pll_lim->refclk = 13500;
+ break;
+ case (1 << 6):
+ pll_lim->refclk = 14318;
+ break;
+ case (1 << 22):
+ pll_lim->refclk = 27000;
+ break;
+ case (1 << 22 | 1 << 6):
+ pll_lim->refclk = 25000;
+ break;
+ }
+
+#if 0 /* for easy debugging */
+ ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+ ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+ ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+ ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+
+ ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+ ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+ ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+ ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+
+ ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+ ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+ ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+ ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+ ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+ ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+ ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+ ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+
+ ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
+ ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+
+ ErrorF("pll.refclk: %d\n", pll_lim->refclk);
+#endif
+
+ return 0;
+}
+
+static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
+{
+ /*
+ * offset + 0 (8 bits): Micro version
+ * offset + 1 (8 bits): Minor version
+ * offset + 2 (8 bits): Chip version
+ * offset + 3 (8 bits): Major version
+ */
+
+ bios->major_version = bios->data[offset + 3];
+ bios->pub.chip_version = bios->data[offset + 2];
+ NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
+ bios->data[offset + 3], bios->data[offset + 2],
+ bios->data[offset + 1], bios->data[offset]);
+}
+
+static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
+{
+ /*
+ * Parses the init table segment for pointers used in script execution.
+ *
+ * offset + 0 (16 bits): init script tables pointer
+ * offset + 2 (16 bits): macro index table pointer
+ * offset + 4 (16 bits): macro table pointer
+ * offset + 6 (16 bits): condition table pointer
+ * offset + 8 (16 bits): io condition table pointer
+ * offset + 10 (16 bits): io flag condition table pointer
+ * offset + 12 (16 bits): init function table pointer
+ */
+
+ bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
+ bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
+ bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
+ bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
+ bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
+ bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
+ bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
+}
+
+static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the load detect values for g80 cards.
+ *
+ * offset + 0 (16 bits): loadval table pointer
+ */
+
+ uint16_t load_table_ptr;
+ uint8_t version, headerlen, entrylen, num_entries;
+
+ if (bitentry->length != 3) {
+ NV_ERROR(dev, "Do not understand BIT A table\n");
+ return -EINVAL;
+ }
+
+ load_table_ptr = ROM16(bios->data[bitentry->offset]);
+
+ if (load_table_ptr == 0x0) {
+ NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
+ return -EINVAL;
+ }
+
+ version = bios->data[load_table_ptr];
+
+ if (version != 0x10) {
+ NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
+ version >> 4, version & 0xF);
+ return -ENOSYS;
+ }
+
+ headerlen = bios->data[load_table_ptr + 1];
+ entrylen = bios->data[load_table_ptr + 2];
+ num_entries = bios->data[load_table_ptr + 3];
+
+ if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
+ NV_ERROR(dev, "Do not understand BIT loadval table\n");
+ return -EINVAL;
+ }
+
+ /* First entry is normal dac, 2nd tv-out perhaps? */
+ bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
+
+ return 0;
+}
+
+static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * offset + 8 (16 bits): PLL limits table pointer
+ *
+ * There's more in here, but that's unknown.
+ */
+
+ if (bitentry->length < 10) {
+ NV_ERROR(dev, "Do not understand BIT C table\n");
+ return -EINVAL;
+ }
+
+ bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
+
+ return 0;
+}
+
+static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the flat panel table segment that the bit entry points to.
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte
+ * records beginning with a freq.
+ * offset + 2 (16 bits): mode table pointer
+ */
+
+ if (bitentry->length != 4) {
+ NV_ERROR(dev, "Do not understand BIT display table\n");
+ return -EINVAL;
+ }
+
+ bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
+
+ return 0;
+}
+
+static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the init table segment that the bit entry points to.
+ *
+ * See parse_script_table_pointers for layout
+ */
+
+ if (bitentry->length < 14) {
+ NV_ERROR(dev, "Do not understand init table\n");
+ return -EINVAL;
+ }
+
+ parse_script_table_pointers(bios, bitentry->offset);
+
+ if (bitentry->length >= 16)
+ bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
+ if (bitentry->length >= 18)
+ bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
+
+ return 0;
+}
+
+static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * BIT 'i' (info?) table
+ *
+ * offset + 0 (32 bits): BIOS version dword (as in B table)
+ * offset + 5 (8 bits): BIOS feature byte (same as for BMP?)
+ * offset + 13 (16 bits): pointer to table containing DAC load
+ * detection comparison values
+ *
+ * There's other things in the table, purpose unknown
+ */
+
+ uint16_t daccmpoffset;
+ uint8_t dacver, dacheaderlen;
+
+ if (bitentry->length < 6) {
+ NV_ERROR(dev, "BIT i table too short for needed information\n");
+ return -EINVAL;
+ }
+
+ parse_bios_version(dev, bios, bitentry->offset);
+
+ /*
+ * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
+ * Quadro identity crisis), other bits possibly as for BMP feature byte
+ */
+ bios->feature_byte = bios->data[bitentry->offset + 5];
+ bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
+
+ if (bitentry->length < 15) {
+ NV_WARN(dev, "BIT i table not long enough for DAC load "
+ "detection comparison table\n");
+ return -EINVAL;
+ }
+
+ daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
+
+ /* doesn't exist on g80 */
+ if (!daccmpoffset)
+ return 0;
+
+ /*
+ * The first value in the table, following the header, is the
+ * comparison value, the second entry is a comparison value for
+ * TV load detection.
+ */
+
+ dacver = bios->data[daccmpoffset];
+ dacheaderlen = bios->data[daccmpoffset + 1];
+
+ if (dacver != 0x00 && dacver != 0x10) {
+ NV_WARN(dev, "DAC load detection comparison table version "
+ "%d.%d not known\n", dacver >> 4, dacver & 0xf);
+ return -ENOSYS;
+ }
+
+ bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
+ bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
+
+ return 0;
+}
+
+static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the LVDS table segment that the bit entry points to.
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): LVDS strap xlate table pointer
+ */
+
+ if (bitentry->length != 2) {
+ NV_ERROR(dev, "Do not understand BIT LVDS table\n");
+ return -EINVAL;
+ }
+
+ /*
+ * No idea if it's still called the LVDS manufacturer table, but
+ * the concept's close enough.
+ */
+ bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
+
+ return 0;
+}
+
+static int
+parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_entry *bitentry)
+{
+ /*
+ * offset + 2 (8 bits): number of options in an
+ * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
+ * offset + 3 (16 bits): pointer to strap xlate table for RAM
+ * restrict option selection
+ *
+ * There's a bunch of bits in this table other than the RAM restrict
+ * stuff that we don't use - their use currently unknown
+ */
+
+ uint16_t rr_strap_xlat;
+ uint8_t rr_group_count;
+ int i;
+
+ /*
+ * Older bios versions don't have a sufficiently long table for
+ * what we want
+ */
+ if (bitentry->length < 0x5)
+ return 0;
+
+ if (bitentry->id[1] < 2) {
+ rr_group_count = bios->data[bitentry->offset + 2];
+ rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]);
+ } else {
+ rr_group_count = bios->data[bitentry->offset + 0];
+ rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]);
+ }
+
+ /* adjust length of INIT_87 */
+ for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++);
+ itbl_entry[i].length += rr_group_count * 4;
+
+ /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */
+ for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++);
+ itbl_entry[i].length_multiplier = rr_group_count * 4;
+
+ init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier;
+ bios->ram_restrict_tbl_ptr = rr_strap_xlat;
+
+ return 0;
+}
+
+static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the pointer to the TMDS table
+ *
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): TMDS table pointer
+ *
+ * The TMDS table is typically found just before the DCB table, with a
+ * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
+ * length?)
+ *
+ * At offset +7 is a pointer to a script, which I don't know how to
+ * run yet.
+ * At offset +9 is a pointer to another script, likewise
+ * Offset +11 has a pointer to a table where the first word is a pxclk
+ * frequency and the second word a pointer to a script, which should be
+ * run if the comparison pxclk frequency is less than the pxclk desired.
+ * This repeats for decreasing comparison frequencies
+ * Offset +13 has a pointer to a similar table
+ * The selection of table (and possibly +7/+9 script) is dictated by
+ * "or" from the DCB.
+ */
+
+ uint16_t tmdstableptr, script1, script2;
+
+ if (bitentry->length != 2) {
+ NV_ERROR(dev, "Do not understand BIT TMDS table\n");
+ return -EINVAL;
+ }
+
+ tmdstableptr = ROM16(bios->data[bitentry->offset]);
+
+ if (tmdstableptr == 0x0) {
+ NV_ERROR(dev, "Pointer to TMDS table invalid\n");
+ return -EINVAL;
+ }
+
+ /* nv50+ has v2.0, but we don't parse it atm */
+ if (bios->data[tmdstableptr] != 0x11) {
+ NV_WARN(dev,
+ "TMDS table revision %d.%d not currently supported\n",
+ bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+ return -ENOSYS;
+ }
+
+ /*
+ * These two scripts are odd: they don't seem to get run even when
+ * they are not stubbed.
+ */
+ script1 = ROM16(bios->data[tmdstableptr + 7]);
+ script2 = ROM16(bios->data[tmdstableptr + 9]);
+ if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
+ NV_WARN(dev, "TMDS table script pointers not stubbed\n");
+
+ bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
+ bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
+
+ return 0;
+}
+
+static int
+parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_entry *bitentry)
+{
+ /*
+ * Parses the pointer to the G80 output script tables
+ *
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): output script table pointer
+ */
+
+ uint16_t outputscripttableptr;
+
+ if (bitentry->length != 3) {
+ NV_ERROR(dev, "Do not understand BIT U table\n");
+ return -EINVAL;
+ }
+
+ outputscripttableptr = ROM16(bios->data[bitentry->offset]);
+ bios->display.script_table_ptr = outputscripttableptr;
+ return 0;
+}
+
+static int
+parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_entry *bitentry)
+{
+ bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
+ return 0;
+}
+
+struct bit_table {
+ const char id;
+ int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+};
+
+#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+
+static int
+parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
+ struct bit_table *table)
+{
+ struct drm_device *dev = bios->dev;
+ uint8_t maxentries = bios->data[bitoffset + 4];
+ int i, offset;
+ struct bit_entry bitentry;
+
+ for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
+ bitentry.id[0] = bios->data[offset];
+
+ if (bitentry.id[0] != table->id)
+ continue;
+
+ bitentry.id[1] = bios->data[offset + 1];
+ bitentry.length = ROM16(bios->data[offset + 2]);
+ bitentry.offset = ROM16(bios->data[offset + 4]);
+
+ return table->parse_fn(dev, bios, &bitentry);
+ }
+
+ NV_INFO(dev, "BIT table '%c' not found\n", table->id);
+ return -ENOSYS;
+}
+
+static int
+parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
+{
+ int ret;
+
+ /*
+ * The only restriction on parsing order currently is having 'i' first
+ * for use of bios->*_version or bios->feature_byte while parsing;
+ * functions shouldn't be actually *doing* anything apart from pulling
+ * data from the image into the bios struct, thus no interdependencies
+ */
+ ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
+ if (ret) /* info? */
+ return ret;
+ if (bios->major_version >= 0x60) /* g80+ */
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
+ ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
+ if (ret)
+ return ret;
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
+ ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
+ if (ret)
+ return ret;
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
+
+ return 0;
+}
+
+static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
+{
+ /*
+ * Parses the BMP structure for useful things, but does not act on them
+ *
+ * offset + 5: BMP major version
+ * offset + 6: BMP minor version
+ * offset + 9: BMP feature byte
+ * offset + 10: BCD encoded BIOS version
+ *
+ * offset + 18: init script table pointer (for bios versions < 5.10h)
+ * offset + 20: extra init script table pointer (for bios
+ * versions < 5.10h)
+ *
+ * offset + 24: memory init table pointer (used on early bios versions)
+ * offset + 26: SDR memory sequencing setup data table
+ * offset + 28: DDR memory sequencing setup data table
+ *
+ * offset + 54: index of I2C CRTC pair to use for CRT output
+ * offset + 55: index of I2C CRTC pair to use for TV output
+ * offset + 56: index of I2C CRTC pair to use for flat panel output
+ * offset + 58: write CRTC index for I2C pair 0
+ * offset + 59: read CRTC index for I2C pair 0
+ * offset + 60: write CRTC index for I2C pair 1
+ * offset + 61: read CRTC index for I2C pair 1
+ *
+ * offset + 67: maximum internal PLL frequency (single stage PLL)
+ * offset + 71: minimum internal PLL frequency (single stage PLL)
+ *
+ * offset + 75: script table pointers, as described in
+ * parse_script_table_pointers
+ *
+ * offset + 89: TMDS single link output A table pointer
+ * offset + 91: TMDS single link output B table pointer
+ * offset + 95: LVDS single link output A table pointer
+ * offset + 105: flat panel timings table pointer
+ * offset + 107: flat panel strapping translation table pointer
+ * offset + 117: LVDS manufacturer panel config table pointer
+ * offset + 119: LVDS manufacturer strapping translation table pointer
+ *
+ * offset + 142: PLL limits table pointer
+ *
+ * offset + 156: minimum pixel clock for LVDS dual link
+ */
+
+ uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
+ uint16_t bmplength;
+ uint16_t legacy_scripts_offset, legacy_i2c_offset;
+
+ /* load needed defaults in case we can't parse this info */
+ bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+ bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+ bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+ bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
+ bios->pub.digital_min_front_porch = 0x4b;
+ bios->fmaxvco = 256000;
+ bios->fminvco = 128000;
+ bios->fp.duallink_transition_clk = 90000;
+
+ bmp_version_major = bmp[5];
+ bmp_version_minor = bmp[6];
+
+ NV_TRACE(dev, "BMP version %d.%d\n",
+ bmp_version_major, bmp_version_minor);
+
+ /*
+ * Make sure that 0x36 is blank and can't be mistaken for a DCB
+ * pointer on early versions
+ */
+ if (bmp_version_major < 5)
+ *(uint16_t *)&bios->data[0x36] = 0;
+
+ /*
+ * Seems that the minor version was 1 for all major versions prior
+ * to 5. Version 6 could theoretically exist, but I suspect BIT
+ * happened instead.
+ */
+ if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
+ NV_ERROR(dev, "You have an unsupported BMP version. "
+ "Please send in your bios\n");
+ return -ENOSYS;
+ }
+
+ if (bmp_version_major == 0)
+ /* nothing that's currently useful in this version */
+ return 0;
+ else if (bmp_version_major == 1)
+ bmplength = 44; /* exact for 1.01 */
+ else if (bmp_version_major == 2)
+ bmplength = 48; /* exact for 2.01 */
+ else if (bmp_version_major == 3)
+ bmplength = 54;
+ /* guessed - mem init tables added in this version */
+ else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
+ /* don't know if 5.0 exists... */
+ bmplength = 62;
+ /* guessed - BMP I2C indices added in version 4*/
+ else if (bmp_version_minor < 0x6)
+ bmplength = 67; /* exact for 5.01 */
+ else if (bmp_version_minor < 0x10)
+ bmplength = 75; /* exact for 5.06 */
+ else if (bmp_version_minor == 0x10)
+ bmplength = 89; /* exact for 5.10h */
+ else if (bmp_version_minor < 0x14)
+ bmplength = 118; /* exact for 5.11h */
+ else if (bmp_version_minor < 0x24)
+ /*
+ * Not sure of version where pll limits came in;
+ * certainly exist by 0x24 though.
+ */
+ /* length not exact: this is long enough to get lvds members */
+ bmplength = 123;
+ else if (bmp_version_minor < 0x27)
+ /*
+ * Length not exact: this is long enough to get pll limit
+ * member
+ */
+ bmplength = 144;
+ else
+ /*
+ * Length not exact: this is long enough to get dual link
+ * transition clock.
+ */
+ bmplength = 158;
+
+ /* checksum */
+ if (nv_cksum(bmp, 8)) {
+ NV_ERROR(dev, "Bad BMP checksum\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Bit 4 seems to indicate either a mobile bios or a quadro card --
+ * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
+ * (not nv10gl), bit 5 that the flat panel tables are present, and
+ * bit 6 a tv bios.
+ */
+ bios->feature_byte = bmp[9];
+
+ parse_bios_version(dev, bios, offset + 10);
+
+ if (bmp_version_major < 5 || bmp_version_minor < 0x10)
+ bios->old_style_init = true;
+ legacy_scripts_offset = 18;
+ if (bmp_version_major < 2)
+ legacy_scripts_offset -= 4;
+ bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
+ bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
+
+ if (bmp_version_major > 2) { /* appears in BMP 3 */
+ bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
+ bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
+ bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
+ }
+
+ legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */
+ if (bmplength > 61)
+ legacy_i2c_offset = offset + 54;
+ bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
+ bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
+ bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
+ bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+
+ if (bmplength > 74) {
+ bios->fmaxvco = ROM32(bmp[67]);
+ bios->fminvco = ROM32(bmp[71]);
+ }
+ if (bmplength > 88)
+ parse_script_table_pointers(bios, offset + 75);
+ if (bmplength > 94) {
+ bios->tmds.output0_script_ptr = ROM16(bmp[89]);
+ bios->tmds.output1_script_ptr = ROM16(bmp[91]);
+ /*
+ * Never observed in use with lvds scripts, but is reused for
+ * 18/24 bit panel interface default for EDID equipped panels
+ * (if_is_24bit not set directly to avoid any oscillation).
+ */
+ bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
+ }
+ if (bmplength > 108) {
+ bios->fp.fptablepointer = ROM16(bmp[105]);
+ bios->fp.fpxlatetableptr = ROM16(bmp[107]);
+ bios->fp.xlatwidth = 1;
+ }
+ if (bmplength > 120) {
+ bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
+ bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
+ }
+ if (bmplength > 143)
+ bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
+
+ if (bmplength > 157)
+ bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
+
+ return 0;
+}
+
+static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
+{
+ int i, j;
+
+ for (i = 0; i <= (n - len); i++) {
+ for (j = 0; j < len; j++)
+ if (data[i + j] != str[j])
+ break;
+ if (j == len)
+ return i;
+ }
+
+ return 0;
+}
+
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+ int recordoffset = 0, rdofs = 1, wrofs = 0;
+ uint8_t port_type = 0;
+
+ if (!i2ctable)
+ return -EINVAL;
+
+ if (dcb_version >= 0x30) {
+ if (i2ctable[0] != dcb_version) /* necessary? */
+ NV_WARN(dev,
+ "DCB I2C table version mismatch (%02X vs %02X)\n",
+ i2ctable[0], dcb_version);
+ dcb_i2c_ver = i2ctable[0];
+ headerlen = i2ctable[1];
+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+ i2c_entries = i2ctable[2];
+ else
+ NV_WARN(dev,
+ "DCB I2C table has more entries than indexable "
+ "(%d entries, max index 15)\n", i2ctable[2]);
+ entry_len = i2ctable[3];
+ /* [4] is i2c_default_indices, read in parse_dcb_table() */
+ }
+ /*
+ * It's your own fault if you call this function on a DCB 1.1 BIOS --
+ * the test below is for DCB 1.2
+ */
+ if (dcb_version < 0x14) {
+ recordoffset = 2;
+ rdofs = 0;
+ wrofs = 1;
+ }
+
+ if (index == 0xf)
+ return 0;
+ if (index > i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
+ index, i2ctable[2]);
+ return -ENOENT;
+ }
+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+ NV_ERROR(dev, "DCB I2C entry invalid\n");
+ return -EINVAL;
+ }
+
+ if (dcb_i2c_ver >= 0x30) {
+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+ /*
+ * Fixup for chips using same address offset for read and
+ * write.
+ */
+ if (port_type == 4) /* seen on C51 */
+ rdofs = wrofs = 1;
+ if (port_type >= 5) /* G80+ */
+ rdofs = wrofs = 0;
+ }
+
+ if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+ i2c->port_type = port_type;
+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+ return 0;
+}
+
+static struct dcb_gpio_entry *
+new_gpio_entry(struct nvbios *bios)
+{
+ struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
+
+ return &gpio->entry[gpio->entries++];
+}
+
+struct dcb_gpio_entry *
+nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int i;
+
+ for (i = 0; i < bios->bdcb.gpio.entries; i++) {
+ if (bios->bdcb.gpio.entry[i].tag != tag)
+ continue;
+
+ return &bios->bdcb.gpio.entry[i];
+ }
+
+ return NULL;
+}
+
+static void
+parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
+{
+ struct dcb_gpio_entry *gpio;
+ uint16_t ent = ROM16(bios->data[offset]);
+ uint8_t line = ent & 0x1f,
+ tag = ent >> 5 & 0x3f,
+ flags = ent >> 11 & 0x1f;
+
+ if (tag == 0x3f)
+ return;
+
+ gpio = new_gpio_entry(bios);
+
+ gpio->tag = tag;
+ gpio->line = line;
+ gpio->invert = flags != 4;
+}
+
+static void
+parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
+{
+ struct dcb_gpio_entry *gpio;
+ uint32_t ent = ROM32(bios->data[offset]);
+ uint8_t line = ent & 0x1f,
+ tag = ent >> 8 & 0xff;
+
+ if (tag == 0xff)
+ return;
+
+ gpio = new_gpio_entry(bios);
+
+ /* Currently unused, we may need more fields parsed at some
+ * point. */
+ gpio->tag = tag;
+ gpio->line = line;
+}
+
+static void
+parse_dcb_gpio_table(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
+ uint8_t *gpio_table = &bios->data[gpio_table_ptr];
+ int header_len = gpio_table[1],
+ entries = gpio_table[2],
+ entry_len = gpio_table[3];
+ void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+ int i;
+
+ if (bios->bdcb.version >= 0x40) {
+ if (gpio_table_ptr && entry_len != 4) {
+ NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
+ return;
+ }
+
+ parse_entry = parse_dcb40_gpio_entry;
+
+ } else if (bios->bdcb.version >= 0x30) {
+ if (gpio_table_ptr && entry_len != 2) {
+ NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
+ return;
+ }
+
+ parse_entry = parse_dcb30_gpio_entry;
+
+ } else if (bios->bdcb.version >= 0x22) {
+ /*
+ * DCBs older than v3.0 don't really have a GPIO
+ * table, instead they keep some GPIO info at fixed
+ * locations.
+ */
+ uint16_t dcbptr = ROM16(bios->data[0x36]);
+ uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+
+ if (tvdac_gpio[0] & 1) {
+ struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+ gpio->tag = DCB_GPIO_TVDAC0;
+ gpio->line = tvdac_gpio[1] >> 4;
+ gpio->invert = tvdac_gpio[0] & 2;
+ }
+ }
+
+ if (!gpio_table_ptr)
+ return;
+
+ if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
+ NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
+ entries = DCB_MAX_NUM_GPIO_ENTRIES;
+ }
+
+ for (i = 0; i < entries; i++)
+ parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
+}
+
+struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *dev, int index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct dcb_connector_table_entry *cte;
+
+ if (index >= bios->bdcb.connector.entries)
+ return NULL;
+
+ cte = &bios->bdcb.connector.entry[index];
+ if (cte->type == 0xff)
+ return NULL;
+
+ return cte;
+}
+
+static void
+parse_dcb_connector_table(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ struct dcb_connector_table *ct = &bios->bdcb.connector;
+ struct dcb_connector_table_entry *cte;
+ uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
+ uint8_t *entry;
+ int i;
+
+ if (!bios->bdcb.connector_table_ptr) {
+ NV_DEBUG(dev, "No DCB connector table present\n");
+ return;
+ }
+
+ NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
+ conntab[0], conntab[1], conntab[2], conntab[3]);
+ if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
+ (conntab[3] != 2 && conntab[3] != 4)) {
+ NV_ERROR(dev, " Unknown! Please report.\n");
+ return;
+ }
+
+ ct->entries = conntab[2];
+
+ entry = conntab + conntab[1];
+ cte = &ct->entry[0];
+ for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+ if (conntab[3] == 2)
+ cte->entry = ROM16(entry[0]);
+ else
+ cte->entry = ROM32(entry[0]);
+ cte->type = (cte->entry & 0x000000ff) >> 0;
+ cte->index = (cte->entry & 0x00000f00) >> 8;
+ switch (cte->entry & 0x00033000) {
+ case 0x00001000:
+ cte->gpio_tag = 0x07;
+ break;
+ case 0x00002000:
+ cte->gpio_tag = 0x08;
+ break;
+ case 0x00010000:
+ cte->gpio_tag = 0x51;
+ break;
+ case 0x00020000:
+ cte->gpio_tag = 0x52;
+ break;
+ default:
+ cte->gpio_tag = 0xff;
+ break;
+ }
+
+ if (cte->type == 0xff)
+ continue;
+
+ NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
+ i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+ }
+}
+
+static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
+{
+ struct dcb_entry *entry = &dcb->entry[dcb->entries];
+
+ memset(entry, 0, sizeof(struct dcb_entry));
+ entry->index = dcb->entries++;
+
+ return entry;
+}
+
+static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
+{
+ struct dcb_entry *entry = new_dcb_entry(dcb);
+
+ entry->type = 0;
+ entry->i2c_index = i2c;
+ entry->heads = heads;
+ entry->location = DCB_LOC_ON_CHIP;
+ /* "or" mostly unused in early gen crt modesetting, 0 is fine */
+}
+
+static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
+{
+ struct dcb_entry *entry = new_dcb_entry(dcb);
+
+ entry->type = 2;
+ entry->i2c_index = LEGACY_I2C_PANEL;
+ entry->heads = twoHeads ? 3 : 1;
+ entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
+ entry->duallink_possible = false; /* SiI164 and co. are single link */
+
+#if 0
+ /*
+ * For dvi-a either crtc probably works, but my card appears to only
+ * support dvi-d. "nvidia" still attempts to program it for dvi-a,
+ * doing the full fp output setup (program 0x6808.. fp dimension regs,
+ * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
+ * the monitor picks up the mode res ok and lights up, but no pixel
+ * data appears, so the board manufacturer probably connected up the
+ * sync lines, but missed the video traces / components
+ *
+ * with this introduction, dvi-a left as an exercise for the reader.
+ */
+ fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
+#endif
+}
+
+static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
+{
+ struct dcb_entry *entry = new_dcb_entry(dcb);
+
+ entry->type = 1;
+ entry->i2c_index = LEGACY_I2C_TV;
+ entry->heads = twoHeads ? 3 : 1;
+ entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+}
+
+static bool
+parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+ uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+{
+ entry->type = conn & 0xf;
+ entry->i2c_index = (conn >> 4) & 0xf;
+ entry->heads = (conn >> 8) & 0xf;
+ if (bdcb->version >= 0x40)
+ entry->connector = (conn >> 12) & 0xf;
+ entry->bus = (conn >> 16) & 0xf;
+ entry->location = (conn >> 20) & 0x3;
+ entry->or = (conn >> 24) & 0xf;
+ /*
+ * Normal entries consist of a single bit, but dual link has the
+ * next most significant bit set too
+ */
+ entry->duallink_possible =
+ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
+
+ switch (entry->type) {
+ case OUTPUT_ANALOG:
+ /*
+ * Although the rest of a CRT conf dword is usually
+ * zeros, mac biosen have stuff there so we must mask
+ */
+ entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
+ (conf & 0xffff) * 10 :
+ (conf & 0xff) * 10000;
+ break;
+ case OUTPUT_LVDS:
+ {
+ uint32_t mask;
+ if (conf & 0x1)
+ entry->lvdsconf.use_straps_for_mode = true;
+ if (bdcb->version < 0x22) {
+ mask = ~0xd;
+ /*
+ * The laptop in bug 14567 lies and claims to not use
+ * straps when it does, so assume all DCB 2.0 laptops
+ * use straps, until a broken EDID using one is produced
+ */
+ entry->lvdsconf.use_straps_for_mode = true;
+ /*
+ * Both 0x4 and 0x8 show up in v2.0 tables; assume they
+ * mean the same thing (probably wrong, but might work)
+ */
+ if (conf & 0x4 || conf & 0x8)
+ entry->lvdsconf.use_power_scripts = true;
+ } else {
+ mask = ~0x5;
+ if (conf & 0x4)
+ entry->lvdsconf.use_power_scripts = true;
+ }
+ if (conf & mask) {
+ /*
+ * Until we even try to use these on G8x, it's
+ * useless reporting unknown bits. They all are.
+ */
+ if (bdcb->version >= 0x40)
+ break;
+
+ NV_ERROR(dev, "Unknown LVDS configuration bits, "
+ "please report\n");
+ }
+ break;
+ }
+ case OUTPUT_TV:
+ {
+ if (bdcb->version >= 0x30)
+ entry->tvconf.has_component_output = conf & (0x8 << 4);
+ else
+ entry->tvconf.has_component_output = false;
+
+ break;
+ }
+ case OUTPUT_DP:
+ entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
+ switch ((conf & 0x0f000000) >> 24) {
+ case 0xf:
+ entry->dpconf.link_nr = 4;
+ break;
+ case 0x3:
+ entry->dpconf.link_nr = 2;
+ break;
+ default:
+ entry->dpconf.link_nr = 1;
+ break;
+ }
+ break;
+ case OUTPUT_TMDS:
+ entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ break;
+ case 0xe:
+ /* weird g80 mobile type that "nv" treats as a terminator */
+ bdcb->dcb.entries--;
+ return false;
+ }
+
+ /* unsure what DCB version introduces this, 3.0? */
+ if (conf & 0x100000)
+ entry->i2c_upper_default = true;
+
+ return true;
+}
+
+static bool
+parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
+ uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+{
+ if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
+ conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
+ conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
+ conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
+ conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
+ conn != 0xf2205004 && conn != 0xf2209004) {
+ NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
+
+ /* cause output setting to fail for !TV, so message is seen */
+ if ((conn & 0xf) != 0x1)
+ dcb->entries = 0;
+
+ return false;
+ }
+ /* most of the below is a "best guess" atm */
+ entry->type = conn & 0xf;
+ if (entry->type == 2)
+ /* another way of specifying straps based lvds... */
+ entry->type = OUTPUT_LVDS;
+ if (entry->type == 4) { /* digital */
+ if (conn & 0x10)
+ entry->type = OUTPUT_LVDS;
+ else
+ entry->type = OUTPUT_TMDS;
+ }
+ /* what's in bits 5-13? could be some encoder maker thing, in tv case */
+ entry->i2c_index = (conn >> 14) & 0xf;
+ /* raw heads field is in range 0-1, so move to 1-2 */
+ entry->heads = ((conn >> 18) & 0x7) + 1;
+ entry->location = (conn >> 21) & 0xf;
+ /* unused: entry->bus = (conn >> 25) & 0x7; */
+ /* set or to be same as heads -- hopefully safe enough */
+ entry->or = entry->heads;
+ entry->duallink_possible = false;
+
+ switch (entry->type) {
+ case OUTPUT_ANALOG:
+ entry->crtconf.maxfreq = (conf & 0xffff) * 10;
+ break;
+ case OUTPUT_LVDS:
+ /*
+ * This is probably buried in conn's unknown bits.
+ * This will upset EDID-ful models, if they exist
+ */
+ entry->lvdsconf.use_straps_for_mode = true;
+ entry->lvdsconf.use_power_scripts = true;
+ break;
+ case OUTPUT_TMDS:
+ /*
+ * Invent a DVI-A output, by copying the fields of the DVI-D
+ * output; reported to work by math_b on an NV20(!).
+ */
+ fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
+ break;
+ case OUTPUT_TV:
+ entry->tvconf.has_component_output = false;
+ break;
+ }
+
+ return true;
+}
+
+static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+ uint32_t conn, uint32_t conf)
+{
+ struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
+ bool ret;
+
+ if (bdcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
+ else
+ ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
+ if (!ret)
+ return ret;
+
+ read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
+ entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
+
+ return true;
+}
+
+static
+void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
+{
+ /*
+ * DCB v2.0 lists each output combination separately.
+ * Here we merge compatible entries to have fewer outputs, with
+ * more options
+ */
+
+ int i, newentries = 0;
+
+ for (i = 0; i < dcb->entries; i++) {
+ struct dcb_entry *ient = &dcb->entry[i];
+ int j;
+
+ for (j = i + 1; j < dcb->entries; j++) {
+ struct dcb_entry *jent = &dcb->entry[j];
+
+ if (jent->type == 100) /* already merged entry */
+ continue;
+
+ /* merge heads field when all other fields the same */
+ if (jent->i2c_index == ient->i2c_index &&
+ jent->type == ient->type &&
+ jent->location == ient->location &&
+ jent->or == ient->or) {
+ NV_TRACE(dev, "Merging DCB entries %d and %d\n",
+ i, j);
+ ient->heads |= jent->heads;
+ jent->type = 100; /* dummy value */
+ }
+ }
+ }
+
+ /* Compact entries merged into others out of dcb */
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].type == 100)
+ continue;
+
+ if (newentries != i) {
+ dcb->entry[newentries] = dcb->entry[i];
+ dcb->entry[newentries].index = newentries;
+ }
+ newentries++;
+ }
+
+ dcb->entries = newentries;
+}
+
+static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+{
+ struct bios_parsed_dcb *bdcb = &bios->bdcb;
+ struct parsed_dcb *dcb;
+ uint16_t dcbptr, i2ctabptr = 0;
+ uint8_t *dcbtable;
+ uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
+ bool configblock = true;
+ int recordlength = 8, confofs = 4;
+ int i;
+
+ dcb = bios->pub.dcb = &bdcb->dcb;
+ dcb->entries = 0;
+
+ /* get the offset from 0x36 */
+ dcbptr = ROM16(bios->data[0x36]);
+
+ if (dcbptr == 0x0) {
+ NV_WARN(dev, "No output data (DCB) found in BIOS, "
+ "assuming a CRT output exists\n");
+ /* this situation likely means a really old card, pre DCB */
+ fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
+
+ if (nv04_tv_identify(dev,
+ bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_tv_output(dcb, twoHeads);
+
+ return 0;
+ }
+
+ dcbtable = &bios->data[dcbptr];
+
+ /* get DCB version */
+ bdcb->version = dcbtable[0];
+ NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
+ bdcb->version >> 4, bdcb->version & 0xf);
+
+ if (bdcb->version >= 0x20) { /* NV17+ */
+ uint32_t sig;
+
+ if (bdcb->version >= 0x30) { /* NV40+ */
+ headerlen = dcbtable[1];
+ entries = dcbtable[2];
+ recordlength = dcbtable[3];
+ i2ctabptr = ROM16(dcbtable[4]);
+ sig = ROM32(dcbtable[6]);
+ bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
+ bdcb->connector_table_ptr = ROM16(dcbtable[20]);
+ } else {
+ i2ctabptr = ROM16(dcbtable[2]);
+ sig = ROM32(dcbtable[4]);
+ headerlen = 8;
+ }
+
+ if (sig != 0x4edcbdcb) {
+ NV_ERROR(dev, "Bad Display Configuration Block "
+ "signature (%08X)\n", sig);
+ return -EINVAL;
+ }
+ } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
+ char sig[8] = { 0 };
+
+ strncpy(sig, (char *)&dcbtable[-7], 7);
+ i2ctabptr = ROM16(dcbtable[2]);
+ recordlength = 10;
+ confofs = 6;
+
+ if (strcmp(sig, "DEV_REC")) {
+ NV_ERROR(dev, "Bad Display Configuration Block "
+ "signature (%s)\n", sig);
+ return -EINVAL;
+ }
+ } else {
+ /*
+ * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
+ * has the same single (crt) entry, even when tv-out present, so
+ * the conclusion is this version cannot really be used.
+ * v1.2 tables (some NV6/10, and NV15+) normally have the same
+ * 5 entries, which are not specific to the card and so no use.
+ * v1.2 does have an I2C table that read_dcb_i2c_table can
+ * handle, but cards exist (nv11 in #14821) with a bad i2c table
+ * pointer, so use the indices parsed in parse_bmp_structure.
+ * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ */
+ NV_TRACEWARN(dev, "No useful information in BIOS output table; "
+ "adding all possible outputs\n");
+ fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
+
+ /*
+ * Attempt to detect TV before DVI because the test
+ * for the former is more accurate and it rules the
+ * latter out.
+ */
+ if (nv04_tv_identify(dev,
+ bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_tv_output(dcb, twoHeads);
+
+ else if (bios->tmds.output0_script_ptr ||
+ bios->tmds.output1_script_ptr)
+ fabricate_dvi_i_output(dcb, twoHeads);
+
+ return 0;
+ }
+
+ if (!i2ctabptr)
+ NV_WARN(dev, "No pointer to DCB I2C port table\n");
+ else {
+ bdcb->i2c_table = &bios->data[i2ctabptr];
+ if (bdcb->version >= 0x30)
+ bdcb->i2c_default_indices = bdcb->i2c_table[4];
+ }
+
+ parse_dcb_gpio_table(bios);
+ parse_dcb_connector_table(bios);
+
+ if (entries > DCB_MAX_NUM_ENTRIES)
+ entries = DCB_MAX_NUM_ENTRIES;
+
+ for (i = 0; i < entries; i++) {
+ uint32_t connection, config = 0;
+
+ connection = ROM32(dcbtable[headerlen + recordlength * i]);
+ if (configblock)
+ config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
+
+ /* seen on an NV11 with DCB v1.5 */
+ if (connection == 0x00000000)
+ break;
+
+ /* seen on an NV17 with DCB v2.0 */
+ if (connection == 0xffffffff)
+ break;
+
+ if ((connection & 0x0000000f) == 0x0000000f)
+ continue;
+
+ NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
+ dcb->entries, connection, config);
+
+ if (!parse_dcb_entry(dev, bdcb, connection, config))
+ break;
+ }
+
+ /*
+ * apart for v2.1+ not being known for requiring merging, this
+ * guarantees dcbent->index is the index of the entry in the rom image
+ */
+ if (bdcb->version < 0x21)
+ merge_like_dcb_entries(dev, dcb);
+
+ return dcb->entries ? 0 : -ENXIO;
+}
+
+static void
+fixup_legacy_connector(struct nvbios *bios)
+{
+ struct bios_parsed_dcb *bdcb = &bios->bdcb;
+ struct parsed_dcb *dcb = &bdcb->dcb;
+ int high = 0, i;
+
+ /*
+ * DCB 3.0 also has the table in most cases, but there are some cards
+ * where the table is filled with stub entries, and the DCB entriy
+ * indices are all 0. We don't need the connector indices on pre-G80
+ * chips (yet?) so limit the use to DCB 4.0 and above.
+ */
+ if (bdcb->version >= 0x40)
+ return;
+
+ /*
+ * No known connector info before v3.0, so make it up. the rule here
+ * is: anything on the same i2c bus is considered to be on the same
+ * connector. any output without an associated i2c bus is assigned
+ * its own unique connector index.
+ */
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].i2c_index == 0xf)
+ continue;
+
+ /*
+ * Ignore the I2C index for on-chip TV-out, as there
+ * are cards with bogus values (nv31m in bug 23212),
+ * and it's otherwise useless.
+ */
+ if (dcb->entry[i].type == OUTPUT_TV &&
+ dcb->entry[i].location == DCB_LOC_ON_CHIP) {
+ dcb->entry[i].i2c_index = 0xf;
+ continue;
+ }
+
+ dcb->entry[i].connector = dcb->entry[i].i2c_index;
+ if (dcb->entry[i].connector > high)
+ high = dcb->entry[i].connector;
+ }
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].i2c_index != 0xf)
+ continue;
+
+ dcb->entry[i].connector = ++high;
+ }
+}
+
+static void
+fixup_legacy_i2c(struct nvbios *bios)
+{
+ struct parsed_dcb *dcb = &bios->bdcb.dcb;
+ int i;
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
+ }
+}
+
+static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
+{
+ /*
+ * The header following the "HWSQ" signature has the number of entries,
+ * and the entry size
+ *
+ * An entry consists of a dword to write to the sequencer control reg
+ * (0x00001304), followed by the ucode bytes, written sequentially,
+ * starting at reg 0x00001400
+ */
+
+ uint8_t bytes_to_write;
+ uint16_t hwsq_entry_offset;
+ int i;
+
+ if (bios->data[hwsq_offset] <= entry) {
+ NV_ERROR(dev, "Too few entries in HW sequencer table for "
+ "requested entry\n");
+ return -ENOENT;
+ }
+
+ bytes_to_write = bios->data[hwsq_offset + 1];
+
+ if (bytes_to_write != 36) {
+ NV_ERROR(dev, "Unknown HW sequencer entry size\n");
+ return -EINVAL;
+ }
+
+ NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
+
+ hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
+
+ /* set sequencer control */
+ bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
+ bytes_to_write -= 4;
+
+ /* write ucode */
+ for (i = 0; i < bytes_to_write; i += 4)
+ bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
+
+ /* twiddle NV_PBUS_DEBUG_4 */
+ bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
+
+ return 0;
+}
+
+static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
+ struct nvbios *bios)
+{
+ /*
+ * BMP based cards, from NV17, need a microcode loading to correctly
+ * control the GPIO etc for LVDS panels
+ *
+ * BIT based cards seem to do this directly in the init scripts
+ *
+ * The microcode entries are found by the "HWSQ" signature.
+ */
+
+ const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
+ const int sz = sizeof(hwsq_signature);
+ int hwsq_offset;
+
+ hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
+ if (!hwsq_offset)
+ return 0;
+
+ /* always use entry 0? */
+ return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
+}
+
+uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ const uint8_t edid_sig[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+ uint16_t offset = 0;
+ uint16_t newoffset;
+ int searchlen = NV_PROM_SIZE;
+
+ if (bios->fp.edid)
+ return bios->fp.edid;
+
+ while (searchlen) {
+ newoffset = findstr(&bios->data[offset], searchlen,
+ edid_sig, 8);
+ if (!newoffset)
+ return NULL;
+ offset += newoffset;
+ if (!nv_cksum(&bios->data[offset], EDID1_LEN))
+ break;
+
+ searchlen -= offset;
+ offset++;
+ }
+
+ NV_TRACE(dev, "Found EDID in BIOS\n");
+
+ return bios->fp.edid = &bios->data[offset];
+}
+
+void
+nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
+ struct dcb_entry *dcbent)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct init_exec iexec = { true, false };
+
+ bios->display.output = dcbent;
+ parse_init_table(bios, table, &iexec);
+ bios->display.output = NULL;
+}
+
+static bool NVInitVBIOS(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+
+ memset(bios, 0, sizeof(struct nvbios));
+ bios->dev = dev;
+
+ if (!NVShadowVBIOS(dev, bios->data))
+ return false;
+
+ bios->length = NV_PROM_SIZE;
+ return true;
+}
+
+static int nouveau_parse_vbios_struct(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
+ const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
+ int offset;
+
+ offset = findstr(bios->data, bios->length,
+ bit_signature, sizeof(bit_signature));
+ if (offset) {
+ NV_TRACE(dev, "BIT BIOS found\n");
+ return parse_bit_structure(bios, offset + 6);
+ }
+
+ offset = findstr(bios->data, bios->length,
+ bmp_signature, sizeof(bmp_signature));
+ if (offset) {
+ NV_TRACE(dev, "BMP BIOS found\n");
+ return parse_bmp_structure(dev, bios, offset);
+ }
+
+ NV_ERROR(dev, "No known BIOS signature found\n");
+ return -ENODEV;
+}
+
+int
+nouveau_run_vbios_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int i, ret = 0;
+
+ NVLockVgaCrtcs(dev, false);
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, bios->state.crtchead);
+
+ if (bios->major_version < 5) /* BMP only */
+ load_nv17_hw_sequencer_ucode(dev, bios);
+
+ if (bios->execute) {
+ bios->fp.last_script_invoc = 0;
+ bios->fp.lvds_init_run = false;
+ }
+
+ parse_init_tables(bios);
+
+ /*
+ * Runs some additional script seen on G8x VBIOSen. The VBIOS'
+ * parser will run this right after the init tables, the binary
+ * driver appears to run it at some point later.
+ */
+ if (bios->some_script_ptr) {
+ struct init_exec iexec = {true, false};
+
+ NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
+ bios->some_script_ptr);
+ parse_init_table(bios, bios->some_script_ptr, &iexec);
+ }
+
+ if (dev_priv->card_type >= NV_50) {
+ for (i = 0; i < bios->bdcb.dcb.entries; i++) {
+ nouveau_bios_run_display_table(dev,
+ &bios->bdcb.dcb.entry[i],
+ 0, 0);
+ }
+ }
+
+ NVLockVgaCrtcs(dev, true);
+
+ return ret;
+}
+
+static void
+nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct dcb_i2c_entry *entry;
+ int i;
+
+ entry = &bios->bdcb.dcb.i2c[0];
+ for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
+ nouveau_i2c_fini(dev, entry);
+}
+
+int
+nouveau_bios_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint32_t saved_nv_pextdev_boot_0;
+ bool was_locked;
+ int ret;
+
+ dev_priv->vbios = &bios->pub;
+
+ if (!NVInitVBIOS(dev))
+ return -ENODEV;
+
+ ret = nouveau_parse_vbios_struct(dev);
+ if (ret)
+ return ret;
+
+ ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+ if (ret)
+ return ret;
+
+ fixup_legacy_i2c(bios);
+ fixup_legacy_connector(bios);
+
+ if (!bios->major_version) /* we don't run version 0 bios */
+ return 0;
+
+ /* these will need remembering across a suspend */
+ saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
+ bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
+
+ /* init script execution disabled */
+ bios->execute = false;
+
+ /* ... unless card isn't POSTed already */
+ if (dev_priv->card_type >= NV_10 &&
+ NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
+ NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
+ NV_INFO(dev, "Adaptor not initialised\n");
+ if (dev_priv->card_type < NV_50) {
+ NV_ERROR(dev, "Unable to POST this chipset\n");
+ return -ENODEV;
+ }
+
+ NV_INFO(dev, "Running VBIOS init tables\n");
+ bios->execute = true;
+ }
+
+ bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
+
+ ret = nouveau_run_vbios_init(dev);
+ if (ret) {
+ dev_priv->vbios = NULL;
+ return ret;
+ }
+
+ /* feature_byte on BMP is poor, but init always sets CR4B */
+ was_locked = NVLockVgaCrtcs(dev, false);
+ if (bios->major_version < 5)
+ bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
+
+ /* all BIT systems need p_f_m_t for digital_min_front_porch */
+ if (bios->is_mobile || bios->major_version >= 5)
+ ret = parse_fp_mode_table(dev, bios);
+ NVLockVgaCrtcs(dev, was_locked);
+
+ /* allow subsequent scripts to execute */
+ bios->execute = true;
+
+ return 0;
+}
+
+void
+nouveau_bios_takedown(struct drm_device *dev)
+{
+ nouveau_bios_i2c_devices_takedown(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
new file mode 100644
index 00000000000..1d5f10bd78e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2007-2008 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_BIOS_H__
+#define __NOUVEAU_BIOS_H__
+
+#include "nvreg.h"
+#include "nouveau_i2c.h"
+
+#define DCB_MAX_NUM_ENTRIES 16
+#define DCB_MAX_NUM_I2C_ENTRIES 16
+#define DCB_MAX_NUM_GPIO_ENTRIES 32
+#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
+
+#define DCB_LOC_ON_CHIP 0
+
+struct dcb_entry {
+ int index; /* may not be raw dcb index if merging has happened */
+ uint8_t type;
+ uint8_t i2c_index;
+ uint8_t heads;
+ uint8_t connector;
+ uint8_t bus;
+ uint8_t location;
+ uint8_t or;
+ bool duallink_possible;
+ union {
+ struct sor_conf {
+ int link;
+ } sorconf;
+ struct {
+ int maxfreq;
+ } crtconf;
+ struct {
+ struct sor_conf sor;
+ bool use_straps_for_mode;
+ bool use_power_scripts;
+ } lvdsconf;
+ struct {
+ bool has_component_output;
+ } tvconf;
+ struct {
+ struct sor_conf sor;
+ int link_nr;
+ int link_bw;
+ } dpconf;
+ struct {
+ struct sor_conf sor;
+ } tmdsconf;
+ };
+ bool i2c_upper_default;
+};
+
+struct dcb_i2c_entry {
+ uint8_t port_type;
+ uint8_t read, write;
+ struct nouveau_i2c_chan *chan;
+};
+
+struct parsed_dcb {
+ int entries;
+ struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
+ struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
+};
+
+enum dcb_gpio_tag {
+ DCB_GPIO_TVDAC0 = 0xc,
+ DCB_GPIO_TVDAC1 = 0x2d,
+};
+
+struct dcb_gpio_entry {
+ enum dcb_gpio_tag tag;
+ int line;
+ bool invert;
+};
+
+struct parsed_dcb_gpio {
+ int entries;
+ struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+};
+
+struct dcb_connector_table_entry {
+ uint32_t entry;
+ uint8_t type;
+ uint8_t index;
+ uint8_t gpio_tag;
+};
+
+struct dcb_connector_table {
+ int entries;
+ struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+};
+
+struct bios_parsed_dcb {
+ uint8_t version;
+
+ struct parsed_dcb dcb;
+
+ uint8_t *i2c_table;
+ uint8_t i2c_default_indices;
+
+ uint16_t gpio_table_ptr;
+ struct parsed_dcb_gpio gpio;
+ uint16_t connector_table_ptr;
+ struct dcb_connector_table connector;
+};
+
+enum nouveau_encoder_type {
+ OUTPUT_ANALOG = 0,
+ OUTPUT_TV = 1,
+ OUTPUT_TMDS = 2,
+ OUTPUT_LVDS = 3,
+ OUTPUT_DP = 6,
+ OUTPUT_ANY = -1
+};
+
+enum nouveau_or {
+ OUTPUT_A = (1 << 0),
+ OUTPUT_B = (1 << 1),
+ OUTPUT_C = (1 << 2)
+};
+
+enum LVDS_script {
+ /* Order *does* matter here */
+ LVDS_INIT = 1,
+ LVDS_RESET,
+ LVDS_BACKLIGHT_ON,
+ LVDS_BACKLIGHT_OFF,
+ LVDS_PANEL_ON,
+ LVDS_PANEL_OFF
+};
+
+/* changing these requires matching changes to reg tables in nv_get_clock */
+#define MAX_PLL_TYPES 4
+enum pll_types {
+ NVPLL,
+ MPLL,
+ VPLL1,
+ VPLL2
+};
+
+struct pll_lims {
+ struct {
+ int minfreq;
+ int maxfreq;
+ int min_inputfreq;
+ int max_inputfreq;
+
+ uint8_t min_m;
+ uint8_t max_m;
+ uint8_t min_n;
+ uint8_t max_n;
+ } vco1, vco2;
+
+ uint8_t max_log2p;
+ /*
+ * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
+ * value) is no different to 6 (at least for vplls) so allowing the MNP
+ * calc to use 7 causes the generated clock to be out by a factor of 2.
+ * however, max_log2p cannot be fixed-up during parsing as the
+ * unmodified max_log2p value is still needed for setting mplls, hence
+ * an additional max_usable_log2p member
+ */
+ uint8_t max_usable_log2p;
+ uint8_t log2p_bias;
+
+ uint8_t min_p;
+ uint8_t max_p;
+
+ int refclk;
+};
+
+struct nouveau_bios_info {
+ struct parsed_dcb *dcb;
+
+ uint8_t chip_version;
+
+ uint32_t dactestval;
+ uint32_t tvdactestval;
+ uint8_t digital_min_front_porch;
+ bool fp_no_ddc;
+};
+
+struct nvbios {
+ struct drm_device *dev;
+ struct nouveau_bios_info pub;
+
+ uint8_t data[NV_PROM_SIZE];
+ unsigned int length;
+ bool execute;
+
+ uint8_t major_version;
+ uint8_t feature_byte;
+ bool is_mobile;
+
+ uint32_t fmaxvco, fminvco;
+
+ bool old_style_init;
+ uint16_t init_script_tbls_ptr;
+ uint16_t extra_init_script_tbl_ptr;
+ uint16_t macro_index_tbl_ptr;
+ uint16_t macro_tbl_ptr;
+ uint16_t condition_tbl_ptr;
+ uint16_t io_condition_tbl_ptr;
+ uint16_t io_flag_condition_tbl_ptr;
+ uint16_t init_function_tbl_ptr;
+
+ uint16_t pll_limit_tbl_ptr;
+ uint16_t ram_restrict_tbl_ptr;
+
+ uint16_t some_script_ptr; /* BIT I + 14 */
+ uint16_t init96_tbl_ptr; /* BIT I + 16 */
+
+ struct bios_parsed_dcb bdcb;
+
+ struct {
+ int crtchead;
+ /* these need remembering across suspend */
+ uint32_t saved_nv_pfb_cfg0;
+ } state;
+
+ struct {
+ struct dcb_entry *output;
+ uint16_t script_table_ptr;
+ uint16_t dp_table_ptr;
+ } display;
+
+ struct {
+ uint16_t fptablepointer; /* also used by tmds */
+ uint16_t fpxlatetableptr;
+ int xlatwidth;
+ uint16_t lvdsmanufacturerpointer;
+ uint16_t fpxlatemanufacturertableptr;
+ uint16_t mode_ptr;
+ uint16_t xlated_entry;
+ bool power_off_for_reset;
+ bool reset_after_pclk_change;
+ bool dual_link;
+ bool link_c_increment;
+ bool BITbit1;
+ bool if_is_24bit;
+ int duallink_transition_clk;
+ uint8_t strapless_is_24bit;
+ uint8_t *edid;
+
+ /* will need resetting after suspend */
+ int last_script_invoc;
+ bool lvds_init_run;
+ } fp;
+
+ struct {
+ uint16_t output0_script_ptr;
+ uint16_t output1_script_ptr;
+ } tmds;
+
+ struct {
+ uint16_t mem_init_tbl_ptr;
+ uint16_t sdr_seq_tbl_ptr;
+ uint16_t ddr_seq_tbl_ptr;
+
+ struct {
+ uint8_t crt, tv, panel;
+ } i2c_indices;
+
+ uint16_t lvds_single_a_script_ptr;
+ } legacy;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 00000000000..320a14bceb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,671 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+static void
+nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ ttm_bo_kunmap(&nvbo->kmap);
+
+ if (unlikely(nvbo->gem))
+ DRM_ERROR("bo %p still attached to GEM object\n", bo);
+
+ spin_lock(&dev_priv->ttm.bo_list_lock);
+ list_del(&nvbo->head);
+ spin_unlock(&dev_priv->ttm.bo_list_lock);
+ kfree(nvbo);
+}
+
+int
+nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t flags, uint32_t tile_mode,
+ uint32_t tile_flags, bool no_vm, bool mappable,
+ struct nouveau_bo **pnvbo)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_bo *nvbo;
+ int ret, n = 0;
+
+ nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
+ if (!nvbo)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&nvbo->head);
+ INIT_LIST_HEAD(&nvbo->entry);
+ nvbo->mappable = mappable;
+ nvbo->no_vm = no_vm;
+ nvbo->tile_mode = tile_mode;
+ nvbo->tile_flags = tile_flags;
+
+ /*
+ * Some of the tile_flags have a periodic structure of N*4096 bytes,
+ * align to to that as well as the page size. Overallocate memory to
+ * avoid corruption of other buffer objects.
+ */
+ switch (tile_flags) {
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7a00:
+ if (dev_priv->chipset >= 0xA0) {
+ /* This is based on high end cards with 448 bits
+ * memory bus, could be different elsewhere.*/
+ size += 6 * 28672;
+ /* 8 * 28672 is the actual alignment requirement,
+ * but we must also align to page size. */
+ align = 2 * 8 * 28672;
+ } else if (dev_priv->chipset >= 0x90) {
+ size += 3 * 16384;
+ align = 12 * 16834;
+ } else {
+ size += 3 * 8192;
+ /* 12 * 8192 is the actual alignment requirement,
+ * but we must also align to page size. */
+ align = 2 * 12 * 8192;
+ }
+ break;
+ default:
+ break;
+ }
+
+ align >>= PAGE_SHIFT;
+
+ size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+ if (dev_priv->card_type == NV_50) {
+ size = (size + 65535) & ~65535;
+ if (align < (65536 / PAGE_SIZE))
+ align = (65536 / PAGE_SIZE);
+ }
+
+ if (flags & TTM_PL_FLAG_VRAM)
+ nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
+ if (flags & TTM_PL_FLAG_TT)
+ nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ nvbo->placement.fpfn = 0;
+ nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
+ nvbo->placement.placement = nvbo->placements;
+ nvbo->placement.busy_placement = nvbo->placements;
+ nvbo->placement.num_placement = n;
+ nvbo->placement.num_busy_placement = n;
+
+ nvbo->channel = chan;
+ nouveau_bo_placement_set(nvbo, flags);
+ ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
+ ttm_bo_type_device, &nvbo->placement, align, 0,
+ false, NULL, size, nouveau_bo_del_ttm);
+ nvbo->channel = NULL;
+ if (ret) {
+ /* ttm will call nouveau_bo_del_ttm if it fails.. */
+ return ret;
+ }
+
+ spin_lock(&dev_priv->ttm.bo_list_lock);
+ list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
+ spin_unlock(&dev_priv->ttm.bo_list_lock);
+ *pnvbo = nvbo;
+ return 0;
+}
+
+void
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
+{
+ int n = 0;
+
+ if (memtype & TTM_PL_FLAG_VRAM)
+ nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
+ if (memtype & TTM_PL_FLAG_TT)
+ nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ if (memtype & TTM_PL_FLAG_SYSTEM)
+ nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
+ nvbo->placement.placement = nvbo->placements;
+ nvbo->placement.busy_placement = nvbo->placements;
+ nvbo->placement.num_placement = n;
+ nvbo->placement.num_busy_placement = n;
+}
+
+int
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret, i;
+
+ if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
+ NV_ERROR(nouveau_bdev(bo->bdev)->dev,
+ "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
+ 1 << bo->mem.mem_type, memtype);
+ return -EINVAL;
+ }
+
+ if (nvbo->pin_refcnt++)
+ return 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ goto out;
+
+ nouveau_bo_placement_set(nvbo, memtype);
+ for (i = 0; i < nvbo->placement.num_placement; i++)
+ nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ if (ret == 0) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ dev_priv->fb_aper_free -= bo->mem.size;
+ break;
+ case TTM_PL_TT:
+ dev_priv->gart_info.aper_free -= bo->mem.size;
+ break;
+ default:
+ break;
+ }
+ }
+ ttm_bo_unreserve(bo);
+out:
+ if (unlikely(ret))
+ nvbo->pin_refcnt--;
+ return ret;
+}
+
+int
+nouveau_bo_unpin(struct nouveau_bo *nvbo)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret, i;
+
+ if (--nvbo->pin_refcnt)
+ return 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nvbo->placement.num_placement; i++)
+ nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ if (ret == 0) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ dev_priv->fb_aper_free += bo->mem.size;
+ break;
+ case TTM_PL_TT:
+ dev_priv->gart_info.aper_free += bo->mem.size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+
+int
+nouveau_bo_map(struct nouveau_bo *nvbo)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ ttm_bo_unreserve(&nvbo->bo);
+ return ret;
+}
+
+void
+nouveau_bo_unmap(struct nouveau_bo *nvbo)
+{
+ ttm_bo_kunmap(&nvbo->kmap);
+}
+
+u16
+nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
+{
+ bool is_iomem;
+ u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ return ioread16_native((void __force __iomem *)mem);
+ else
+ return *mem;
+}
+
+void
+nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
+{
+ bool is_iomem;
+ u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ iowrite16_native(val, (void __force __iomem *)mem);
+ else
+ *mem = val;
+}
+
+u32
+nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
+{
+ bool is_iomem;
+ u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ return ioread32_native((void __force __iomem *)mem);
+ else
+ return *mem;
+}
+
+void
+nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
+{
+ bool is_iomem;
+ u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ iowrite32_native(val, (void __force __iomem *)mem);
+ else
+ *mem = val;
+}
+
+static struct ttm_backend *
+nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ switch (dev_priv->gart_info.type) {
+ case NOUVEAU_GART_AGP:
+ return ttm_agp_backend_init(bdev, dev->agp->bridge);
+ case NOUVEAU_GART_SGDMA:
+ return nouveau_sgdma_init_ttm(dev);
+ default:
+ NV_ERROR(dev, "Unknown GART type %d\n",
+ dev_priv->gart_info.type);
+ break;
+ }
+
+ return NULL;
+}
+
+static int
+nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ /* We'll do this from user space. */
+ return 0;
+}
+
+static int
+nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+
+ man->io_addr = NULL;
+ man->io_offset = drm_get_resource_start(dev, 1);
+ man->io_size = drm_get_resource_len(dev, 1);
+ if (man->io_size > nouveau_mem_fb_amount(dev))
+ man->io_size = nouveau_mem_fb_amount(dev);
+
+ man->gpu_offset = dev_priv->vm_vram_base;
+ break;
+ case TTM_PL_TT:
+ switch (dev_priv->gart_info.type) {
+ case NOUVEAU_GART_AGP:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->available_caching = TTM_PL_FLAG_UNCACHED;
+ man->default_caching = TTM_PL_FLAG_UNCACHED;
+ break;
+ case NOUVEAU_GART_SGDMA:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_CMA;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ default:
+ NV_ERROR(dev, "Unknown GART type: %d\n",
+ dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+
+ man->io_offset = dev_priv->gart_info.aper_base;
+ man->io_size = dev_priv->gart_info.aper_size;
+ man->io_addr = NULL;
+ man->gpu_offset = dev_priv->vm_gart_base;
+ break;
+ default:
+ NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ switch (bo->mem.mem_type) {
+ default:
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
+ break;
+ }
+}
+
+
+/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
+ * TTM_PL_{VRAM,TT} directly.
+ */
+static int
+nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
+ struct nouveau_bo *nvbo, bool evict, bool no_wait,
+ struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
+ evict, no_wait, new_mem);
+ nouveau_fence_unref((void *)&fence);
+ return ret;
+}
+
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+ struct ttm_mem_reg *mem)
+{
+ if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
+ if (mem->mem_type == TTM_PL_TT)
+ return NvDmaGART;
+ return NvDmaVRAM;
+ }
+
+ if (mem->mem_type == TTM_PL_TT)
+ return chan->gart_handle;
+ return chan->vram_handle;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_channel *chan;
+ uint64_t src_offset, dst_offset;
+ uint32_t page_count;
+ int ret;
+
+ chan = nvbo->channel;
+ if (!chan || nvbo->tile_flags || nvbo->no_vm) {
+ chan = dev_priv->channel;
+ if (!chan)
+ return -EINVAL;
+ }
+
+ src_offset = old_mem->mm_node->start << PAGE_SHIFT;
+ dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (chan != dev_priv->channel) {
+ if (old_mem->mem_type == TTM_PL_TT)
+ src_offset += dev_priv->vm_gart_base;
+ else
+ src_offset += dev_priv->vm_vram_base;
+
+ if (new_mem->mem_type == TTM_PL_TT)
+ dst_offset += dev_priv->vm_gart_base;
+ else
+ dst_offset += dev_priv->vm_vram_base;
+ }
+
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+ OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
+ OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
+
+ if (dev_priv->card_type >= NV_50) {
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+ OUT_RING(chan, 1);
+ }
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ if (dev_priv->card_type >= NV_50) {
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+ OUT_RING(chan, upper_32_bits(src_offset));
+ OUT_RING(chan, upper_32_bits(dst_offset));
+ }
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF,
+ NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+ OUT_RING(chan, lower_32_bits(src_offset));
+ OUT_RING(chan, lower_32_bits(dst_offset));
+ OUT_RING(chan, PAGE_SIZE); /* src_pitch */
+ OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
+ OUT_RING(chan, PAGE_SIZE); /* line_length */
+ OUT_RING(chan, line_count);
+ OUT_RING(chan, (1<<8)|(1<<0));
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ OUT_RING(chan, 0);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+}
+
+static int
+nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ struct ttm_placement placement;
+ struct ttm_mem_reg tmp_mem;
+ int ret;
+
+ placement.fpfn = placement.lpfn = 0;
+ placement.num_placement = placement.num_busy_placement = 1;
+ placement.placement = &placement_memtype;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ if (ret)
+ return ret;
+
+ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
+ if (ret)
+ goto out;
+
+ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+out:
+ if (tmp_mem.mm_node) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ drm_mm_put_block(tmp_mem.mm_node);
+ spin_unlock(&bo->bdev->glob->lru_lock);
+ }
+
+ return ret;
+}
+
+static int
+nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ struct ttm_placement placement;
+ struct ttm_mem_reg tmp_mem;
+ int ret;
+
+ placement.fpfn = placement.lpfn = 0;
+ placement.num_placement = placement.num_busy_placement = 1;
+ placement.placement = &placement_memtype;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
+ if (ret)
+ goto out;
+
+out:
+ if (tmp_mem.mm_node) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ drm_mm_put_block(tmp_mem.mm_node);
+ spin_unlock(&bo->bdev->glob->lru_lock);
+ }
+
+ return ret;
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct drm_device *dev = dev_priv->dev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+
+ if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
+ !nvbo->no_vm) {
+ uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
+
+ ret = nv50_mem_vm_bind_linear(dev,
+ offset + dev_priv->vm_vram_base,
+ new_mem->size, nvbo->tile_flags,
+ offset);
+ if (ret)
+ return ret;
+ }
+
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+
+ if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
+ BUG_ON(bo->mem.mm_node != NULL);
+ bo->mem = *new_mem;
+ new_mem->mm_node = NULL;
+ return 0;
+ }
+
+ if (new_mem->mem_type == TTM_PL_SYSTEM) {
+ if (old_mem->mem_type == TTM_PL_SYSTEM)
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
+ if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else {
+ if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+struct ttm_bo_driver nouveau_bo_driver = {
+ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+ .invalidate_caches = nouveau_bo_invalidate_caches,
+ .init_mem_type = nouveau_bo_init_mem_type,
+ .evict_flags = nouveau_bo_evict_flags,
+ .move = nouveau_bo_move,
+ .verify_access = nouveau_bo_verify_access,
+ .sync_obj_signaled = nouveau_fence_signalled,
+ .sync_obj_wait = nouveau_fence_wait,
+ .sync_obj_flush = nouveau_fence_flush,
+ .sync_obj_unref = nouveau_fence_unref,
+ .sync_obj_ref = nouveau_fence_ref,
+};
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
new file mode 100644
index 00000000000..ee2b84504d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+/****************************************************************************\
+* *
+* The video arbitration routines calculate some "magic" numbers. Fixes *
+* the snow seen when accessing the framebuffer without it. *
+* It just works (I hope). *
+* *
+\****************************************************************************/
+
+struct nv_fifo_info {
+ int lwm;
+ int burst;
+};
+
+struct nv_sim_state {
+ int pclk_khz;
+ int mclk_khz;
+ int nvclk_khz;
+ int bpp;
+ int mem_page_miss;
+ int mem_latency;
+ int memory_type;
+ int memory_width;
+ int two_heads;
+};
+
+static void
+nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+ int pagemiss, cas, width, bpp;
+ int nvclks, mclks, pclks, crtpagemiss;
+ int found, mclk_extra, mclk_loop, cbs, m1, p1;
+ int mclk_freq, pclk_freq, nvclk_freq;
+ int us_m, us_n, us_p, crtc_drain_rate;
+ int cpm_us, us_crt, clwm;
+
+ pclk_freq = arb->pclk_khz;
+ mclk_freq = arb->mclk_khz;
+ nvclk_freq = arb->nvclk_khz;
+ pagemiss = arb->mem_page_miss;
+ cas = arb->mem_latency;
+ width = arb->memory_width >> 6;
+ bpp = arb->bpp;
+ cbs = 128;
+
+ pclks = 2;
+ nvclks = 10;
+ mclks = 13 + cas;
+ mclk_extra = 3;
+ found = 0;
+
+ while (!found) {
+ found = 1;
+
+ mclk_loop = mclks + mclk_extra;
+ us_m = mclk_loop * 1000 * 1000 / mclk_freq;
+ us_n = nvclks * 1000 * 1000 / nvclk_freq;
+ us_p = nvclks * 1000 * 1000 / pclk_freq;
+
+ crtc_drain_rate = pclk_freq * bpp / 8;
+ crtpagemiss = 2;
+ crtpagemiss += 1;
+ cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
+ us_crt = cpm_us + us_m + us_n + us_p;
+ clwm = us_crt * crtc_drain_rate / (1000 * 1000);
+ clwm++;
+
+ m1 = clwm + cbs - 512;
+ p1 = m1 * pclk_freq / mclk_freq;
+ p1 = p1 * bpp / 8;
+ if ((p1 < m1 && m1 > 0) || clwm > 519) {
+ found = !mclk_extra;
+ mclk_extra--;
+ }
+ if (clwm < 384)
+ clwm = 384;
+
+ fifo->lwm = clwm;
+ fifo->burst = cbs;
+ }
+}
+
+static void
+nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+ int fill_rate, drain_rate;
+ int pclks, nvclks, mclks, xclks;
+ int pclk_freq, nvclk_freq, mclk_freq;
+ int fill_lat, extra_lat;
+ int max_burst_o, max_burst_l;
+ int fifo_len, min_lwm, max_lwm;
+ const int burst_lat = 80; /* Maximum allowable latency due
+ * to the CRTC FIFO burst. (ns) */
+
+ pclk_freq = arb->pclk_khz;
+ nvclk_freq = arb->nvclk_khz;
+ mclk_freq = arb->mclk_khz;
+
+ fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
+ drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
+
+ fifo_len = arb->two_heads ? 1536 : 1024; /* B */
+
+ /* Fixed FIFO refill latency. */
+
+ pclks = 4; /* lwm detect. */
+
+ nvclks = 3 /* lwm -> sync. */
+ + 2 /* fbi bus cycles (1 req + 1 busy) */
+ + 1 /* 2 edge sync. may be very close to edge so
+ * just put one. */
+ + 1 /* fbi_d_rdv_n */
+ + 1 /* Fbi_d_rdata */
+ + 1; /* crtfifo load */
+
+ mclks = 1 /* 2 edge sync. may be very close to edge so
+ * just put one. */
+ + 1 /* arb_hp_req */
+ + 5 /* tiling pipeline */
+ + 2 /* latency fifo */
+ + 2 /* memory request to fbio block */
+ + 7; /* data returned from fbio block */
+
+ /* Need to accumulate 256 bits for read */
+ mclks += (arb->memory_type == 0 ? 2 : 1)
+ * arb->memory_width / 32;
+
+ fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */
+ + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */
+ + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */
+
+ /* Conditional FIFO refill latency. */
+
+ xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
+ * the overlay. */
+ + 2 * arb->mem_page_miss /* Extra pagemiss latency. */
+ + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */
+
+ extra_lat = xclks * 1000 * 1000 / mclk_freq;
+
+ if (arb->two_heads)
+ /* Account for another CRTC. */
+ extra_lat += fill_lat + extra_lat + burst_lat;
+
+ /* FIFO burst */
+
+ /* Max burst not leading to overflows. */
+ max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
+ * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
+ fifo->burst = min(max_burst_o, 1024);
+
+ /* Max burst value with an acceptable latency. */
+ max_burst_l = burst_lat * fill_rate / (1000 * 1000);
+ fifo->burst = min(max_burst_l, fifo->burst);
+
+ fifo->burst = rounddown_pow_of_two(fifo->burst);
+
+ /* FIFO low watermark */
+
+ min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
+ max_lwm = fifo_len - fifo->burst
+ + fill_lat * drain_rate / (1000 * 1000)
+ + fifo->burst * drain_rate / fill_rate;
+
+ fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
+}
+
+static void
+nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
+ int *burst, int *lwm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv_fifo_info fifo_data;
+ struct nv_sim_state sim_data;
+ int MClk = nouveau_hw_get_clock(dev, MPLL);
+ int NVClk = nouveau_hw_get_clock(dev, NVPLL);
+ uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
+
+ sim_data.pclk_khz = VClk;
+ sim_data.mclk_khz = MClk;
+ sim_data.nvclk_khz = NVClk;
+ sim_data.bpp = bpp;
+ sim_data.two_heads = nv_two_heads(dev);
+ if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+ (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+ uint32_t type;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
+
+ sim_data.memory_type = (type >> 12) & 1;
+ sim_data.memory_width = 64;
+ sim_data.mem_latency = 3;
+ sim_data.mem_page_miss = 10;
+ } else {
+ sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
+ sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
+ sim_data.mem_latency = cfg1 & 0xf;
+ sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
+ }
+
+ if (dev_priv->card_type == NV_04)
+ nv04_calc_arb(&fifo_data, &sim_data);
+ else
+ nv10_calc_arb(&fifo_data, &sim_data);
+
+ *burst = ilog2(fifo_data.burst >> 4);
+ *lwm = fifo_data.lwm >> 3;
+}
+
+static void
+nv30_update_arb(int *burst, int *lwm)
+{
+ unsigned int fifo_size, burst_size, graphics_lwm;
+
+ fifo_size = 2048;
+ burst_size = 512;
+ graphics_lwm = fifo_size - burst_size;
+
+ *burst = ilog2(burst_size >> 5);
+ *lwm = graphics_lwm >> 3;
+}
+
+void
+nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type < NV_30)
+ nv04_update_arb(dev, vclk, bpp, burst, lwm);
+ else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+ (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+ *burst = 128;
+ *lwm = 0x0480;
+ } else
+ nv30_update_arb(burst, lwm);
+}
+
+static int
+getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+ struct nouveau_pll_vals *bestpv)
+{
+ /* Find M, N and P for a single stage PLL
+ *
+ * Note that some bioses (NV3x) have lookup tables of precomputed MNP
+ * values, but we're too lazy to use those atm
+ *
+ * "clk" parameter in kHz
+ * returns calculated clock
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int cv = dev_priv->vbios->chip_version;
+ int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
+ int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
+ int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
+ int minU = pll_lim->vco1.min_inputfreq;
+ int maxU = pll_lim->vco1.max_inputfreq;
+ int minP = pll_lim->max_p ? pll_lim->min_p : 0;
+ int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
+ int crystal = pll_lim->refclk;
+ int M, N, thisP, P;
+ int clkP, calcclk;
+ int delta, bestdelta = INT_MAX;
+ int bestclk = 0;
+
+ /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
+ /* possibly correlated with introduction of 27MHz crystal */
+ if (dev_priv->card_type < NV_50) {
+ if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+ if (clk > 250000)
+ maxM = 6;
+ if (clk > 340000)
+ maxM = 2;
+ } else if (cv < 0x40) {
+ if (clk > 150000)
+ maxM = 6;
+ if (clk > 200000)
+ maxM = 4;
+ if (clk > 340000)
+ maxM = 2;
+ }
+ }
+
+ P = pll_lim->max_p ? maxP : (1 << maxP);
+ if ((clk * P) < minvco) {
+ minvco = clk * maxP;
+ maxvco = minvco * 2;
+ }
+
+ if (clk + clk/200 > maxvco) /* +0.5% */
+ maxvco = clk + clk/200;
+
+ /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
+ for (thisP = minP; thisP <= maxP; thisP++) {
+ P = pll_lim->max_p ? thisP : (1 << thisP);
+ clkP = clk * P;
+
+ if (clkP < minvco)
+ continue;
+ if (clkP > maxvco)
+ return bestclk;
+
+ for (M = minM; M <= maxM; M++) {
+ if (crystal/M < minU)
+ return bestclk;
+ if (crystal/M > maxU)
+ continue;
+
+ /* add crystal/2 to round better */
+ N = (clkP * M + crystal/2) / crystal;
+
+ if (N < minN)
+ continue;
+ if (N > maxN)
+ break;
+
+ /* more rounding additions */
+ calcclk = ((N * crystal + P/2) / P + M/2) / M;
+ delta = abs(calcclk - clk);
+ /* we do an exhaustive search rather than terminating
+ * on an optimality condition...
+ */
+ if (delta < bestdelta) {
+ bestdelta = delta;
+ bestclk = calcclk;
+ bestpv->N1 = N;
+ bestpv->M1 = M;
+ bestpv->log2P = thisP;
+ if (delta == 0) /* except this one */
+ return bestclk;
+ }
+ }
+ }
+
+ return bestclk;
+}
+
+static int
+getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+ struct nouveau_pll_vals *bestpv)
+{
+ /* Find M, N and P for a two stage PLL
+ *
+ * Note that some bioses (NV30+) have lookup tables of precomputed MNP
+ * values, but we're too lazy to use those atm
+ *
+ * "clk" parameter in kHz
+ * returns calculated clock
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chip_version = dev_priv->vbios->chip_version;
+ int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
+ int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
+ int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
+ int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
+ int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
+ int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
+ int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
+ int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
+ int maxlog2P = pll_lim->max_usable_log2p;
+ int crystal = pll_lim->refclk;
+ bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
+ int M1, N1, M2, N2, log2P;
+ int clkP, calcclk1, calcclk2, calcclkout;
+ int delta, bestdelta = INT_MAX;
+ int bestclk = 0;
+
+ int vco2 = (maxvco2 - maxvco2/200) / 2;
+ for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
+ ;
+ clkP = clk << log2P;
+
+ if (maxvco2 < clk + clk/200) /* +0.5% */
+ maxvco2 = clk + clk/200;
+
+ for (M1 = minM1; M1 <= maxM1; M1++) {
+ if (crystal/M1 < minU1)
+ return bestclk;
+ if (crystal/M1 > maxU1)
+ continue;
+
+ for (N1 = minN1; N1 <= maxN1; N1++) {
+ calcclk1 = crystal * N1 / M1;
+ if (calcclk1 < minvco1)
+ continue;
+ if (calcclk1 > maxvco1)
+ break;
+
+ for (M2 = minM2; M2 <= maxM2; M2++) {
+ if (calcclk1/M2 < minU2)
+ break;
+ if (calcclk1/M2 > maxU2)
+ continue;
+
+ /* add calcclk1/2 to round better */
+ N2 = (clkP * M2 + calcclk1/2) / calcclk1;
+ if (N2 < minN2)
+ continue;
+ if (N2 > maxN2)
+ break;
+
+ if (!fixedgain2) {
+ if (chip_version < 0x60)
+ if (N2/M2 < 4 || N2/M2 > 10)
+ continue;
+
+ calcclk2 = calcclk1 * N2 / M2;
+ if (calcclk2 < minvco2)
+ break;
+ if (calcclk2 > maxvco2)
+ continue;
+ } else
+ calcclk2 = calcclk1;
+
+ calcclkout = calcclk2 >> log2P;
+ delta = abs(calcclkout - clk);
+ /* we do an exhaustive search rather than terminating
+ * on an optimality condition...
+ */
+ if (delta < bestdelta) {
+ bestdelta = delta;
+ bestclk = calcclkout;
+ bestpv->N1 = N1;
+ bestpv->M1 = M1;
+ bestpv->N2 = N2;
+ bestpv->M2 = M2;
+ bestpv->log2P = log2P;
+ if (delta == 0) /* except this one */
+ return bestclk;
+ }
+ }
+ }
+ }
+
+ return bestclk;
+}
+
+int
+nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+ struct nouveau_pll_vals *pv)
+{
+ int outclk;
+
+ if (!pll_lim->vco2.maxfreq)
+ outclk = getMNP_single(dev, pll_lim, clk, pv);
+ else
+ outclk = getMNP_double(dev, pll_lim, clk, pv);
+
+ if (!outclk)
+ NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
+
+ return outclk;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
new file mode 100644
index 00000000000..9aaa972f882
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2005-2006 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_bo *pb = chan->pushbuf_bo;
+ struct nouveau_gpuobj *pushbuf = NULL;
+ uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+ int ret;
+
+ if (pb->bo.mem.mem_type == TTM_PL_TT) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RO, &pushbuf,
+ NULL);
+ chan->pushbuf_base = start;
+ } else
+ if (dev_priv->card_type != NV_04) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_VIDMEM, &pushbuf);
+ chan->pushbuf_base = start;
+ } else {
+ /* NV04 cmdbuf hack, from original ddx.. not sure of it's
+ * exact reason for existing :) PCI access to cmdbuf in
+ * VRAM.
+ */
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ drm_get_resource_start(dev, 1),
+ dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_PCI, &pushbuf);
+ chan->pushbuf_base = start;
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
+ if (pushbuf != dev_priv->gart_info.sg_ctxdma)
+ nouveau_gpuobj_del(dev, &pushbuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct nouveau_bo *
+nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
+{
+ struct nouveau_bo *pushbuf = NULL;
+ int location, ret;
+
+ if (nouveau_vram_pushbuf)
+ location = TTM_PL_FLAG_VRAM;
+ else
+ location = TTM_PL_FLAG_TT;
+
+ ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
+ true, &pushbuf);
+ if (ret) {
+ NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
+ return NULL;
+ }
+
+ ret = nouveau_bo_pin(pushbuf, location);
+ if (ret) {
+ NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
+ nouveau_bo_ref(NULL, &pushbuf);
+ return NULL;
+ }
+
+ return pushbuf;
+}
+
+/* allocates and initializes a fifo for user space consumption */
+int
+nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ struct drm_file *file_priv,
+ uint32_t vram_handle, uint32_t tt_handle)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ int channel, user;
+ int ret;
+
+ /*
+ * Alright, here is the full story
+ * Nvidia cards have multiple hw fifo contexts (praise them for that,
+ * no complicated crash-prone context switches)
+ * We allocate a new context for each app and let it write to it
+ * directly (woo, full userspace command submission !)
+ * When there are no more contexts, you lost
+ */
+ for (channel = 0; channel < pfifo->channels; channel++) {
+ if (dev_priv->fifos[channel] == NULL)
+ break;
+ }
+
+ /* no more fifos. you lost. */
+ if (channel == pfifo->channels)
+ return -EINVAL;
+
+ dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
+ GFP_KERNEL);
+ if (!dev_priv->fifos[channel])
+ return -ENOMEM;
+ dev_priv->fifo_alloc_count++;
+ chan = dev_priv->fifos[channel];
+ INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+ INIT_LIST_HEAD(&chan->fence.pending);
+ chan->dev = dev;
+ chan->id = channel;
+ chan->file_priv = file_priv;
+ chan->vram_handle = vram_handle;
+ chan->gart_handle = tt_handle;
+
+ NV_INFO(dev, "Allocating FIFO number %d\n", channel);
+
+ /* Allocate DMA push buffer */
+ chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
+ if (!chan->pushbuf_bo) {
+ ret = -ENOMEM;
+ NV_ERROR(dev, "pushbuf %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Locate channel's user control regs */
+ if (dev_priv->card_type < NV_40)
+ user = NV03_USER(channel);
+ else
+ if (dev_priv->card_type < NV_50)
+ user = NV40_USER(channel);
+ else
+ user = NV50_USER(channel);
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
+ PAGE_SIZE);
+ if (!chan->user) {
+ NV_ERROR(dev, "ioremap of regs failed.\n");
+ nouveau_channel_free(chan);
+ return -ENOMEM;
+ }
+ chan->user_put = 0x40;
+ chan->user_get = 0x44;
+
+ /* Allocate space for per-channel fixed notifier memory */
+ ret = nouveau_notifier_init_channel(chan);
+ if (ret) {
+ NV_ERROR(dev, "ntfy %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Setup channel's default objects */
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+ if (ret) {
+ NV_ERROR(dev, "gpuobj %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Create a dma object for the push buffer */
+ ret = nouveau_channel_pushbuf_ctxdma_init(chan);
+ if (ret) {
+ NV_ERROR(dev, "pbctxdma %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* disable the fifo caches */
+ pfifo->reassign(dev, false);
+
+ /* Create a graphics context for new channel */
+ ret = pgraph->create_context(chan);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Construct inital RAMFC for new channel */
+ ret = pfifo->create_context(chan);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ pfifo->reassign(dev, true);
+
+ ret = nouveau_dma_init(chan);
+ if (!ret)
+ ret = nouveau_fence_init(chan);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ nouveau_debugfs_channel_init(chan);
+
+ NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+ *chan_ret = chan;
+ return 0;
+}
+
+int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t caches;
+ int idle;
+
+ if (!chan) {
+ NV_ERROR(dev, "no channel...\n");
+ return 1;
+ }
+
+ caches = nv_rd32(dev, NV03_PFIFO_CACHES);
+ nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
+
+ if (engine->fifo.channel_id(dev) != chan->id) {
+ struct nouveau_gpuobj *ramfc =
+ chan->ramfc ? chan->ramfc->gpuobj : NULL;
+
+ if (!ramfc) {
+ NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
+ return 1;
+ }
+
+ engine->instmem.prepare_access(dev, false);
+ if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
+ idle = 0;
+ else
+ idle = 1;
+ engine->instmem.finish_access(dev);
+ } else {
+ idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, caches);
+ return idle;
+}
+
+/* stops a fifo */
+void
+nouveau_channel_free(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ unsigned long flags;
+ int ret;
+
+ NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+
+ nouveau_debugfs_channel_fini(chan);
+
+ /* Give outstanding push buffers a chance to complete */
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ nouveau_fence_update(chan);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+ if (chan->fence.sequence != chan->fence.sequence_ack) {
+ struct nouveau_fence *fence = NULL;
+
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret == 0) {
+ ret = nouveau_fence_wait(fence, NULL, false, false);
+ nouveau_fence_unref((void *)&fence);
+ }
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ }
+
+ /* Ensure all outstanding fences are signaled. They should be if the
+ * above attempts at idling were OK, but if we failed this'll tell TTM
+ * we're done with the buffers.
+ */
+ nouveau_fence_fini(chan);
+
+ /* Ensure the channel is no longer active on the GPU */
+ pfifo->reassign(dev, false);
+
+ if (pgraph->channel(dev) == chan) {
+ pgraph->fifo_access(dev, false);
+ pgraph->unload_context(dev);
+ pgraph->fifo_access(dev, true);
+ }
+ pgraph->destroy_context(chan);
+
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+ pfifo->destroy_context(chan);
+
+ pfifo->reassign(dev, true);
+
+ /* Release the channel's resources */
+ nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+ if (chan->pushbuf_bo) {
+ nouveau_bo_unpin(chan->pushbuf_bo);
+ nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+ }
+ nouveau_gpuobj_channel_takedown(chan);
+ nouveau_notifier_takedown_channel(chan);
+ if (chan->user)
+ iounmap(chan->user);
+
+ dev_priv->fifos[chan->id] = NULL;
+ dev_priv->fifo_alloc_count--;
+ kfree(chan);
+}
+
+/* cleans up all the fifos from file_priv */
+void
+nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ int i;
+
+ NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
+ for (i = 0; i < engine->fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->file_priv == file_priv)
+ nouveau_channel_free(chan);
+ }
+}
+
+int
+nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
+ int channel)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ if (channel >= engine->fifo.channels)
+ return 0;
+ if (dev_priv->fifos[channel] == NULL)
+ return 0;
+
+ return (dev_priv->fifos[channel]->file_priv == file_priv);
+}
+
+/***********************************
+ * ioctls wrapping the functions
+ ***********************************/
+
+static int
+nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_channel_alloc *init = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (dev_priv->engine.graph.accel_blocked)
+ return -ENODEV;
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return -EINVAL;
+
+ ret = nouveau_channel_alloc(dev, &chan, file_priv,
+ init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle);
+ if (ret)
+ return ret;
+ init->channel = chan->id;
+
+ init->subchan[0].handle = NvM2MF;
+ if (dev_priv->card_type < NV_50)
+ init->subchan[0].grclass = 0x0039;
+ else
+ init->subchan[0].grclass = 0x5039;
+ init->nr_subchan = 1;
+
+ /* Named memory object area */
+ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
+ &init->notifier_handle);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_channel_free *cfree = data;
+ struct nouveau_channel *chan;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+
+ nouveau_channel_free(chan);
+ return 0;
+}
+
+/***********************************
+ * finally, the ioctl table
+ ***********************************/
+
+struct drm_ioctl_desc nouveau_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
+};
+
+int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
new file mode 100644
index 00000000000..032cf098fa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_connector.h"
+#include "nouveau_hw.h"
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct nouveau_encoder *enc)
+{
+ return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
+}
+
+static struct nouveau_encoder *
+find_encoder_by_type(struct drm_connector *connector, int type)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_mode_object *obj;
+ int i, id;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ id = connector->encoder_ids[i];
+ if (!id)
+ break;
+
+ obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+ nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+ if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
+ return nv_encoder;
+ }
+
+ return NULL;
+}
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
+{
+ struct drm_device *dev = to_drm_encoder(encoder)->dev;
+ struct drm_connector *drm_connector;
+
+ list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
+ if (drm_connector->encoder == to_drm_encoder(encoder))
+ return nouveau_connector(drm_connector);
+ }
+
+ return NULL;
+}
+
+
+static void
+nouveau_connector_destroy(struct drm_connector *drm_connector)
+{
+ struct nouveau_connector *connector = nouveau_connector(drm_connector);
+ struct drm_device *dev = connector->base.dev;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!connector)
+ return;
+
+ drm_sysfs_connector_remove(drm_connector);
+ drm_connector_cleanup(drm_connector);
+ kfree(drm_connector);
+}
+
+static void
+nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ return;
+
+ *flags = 0;
+ if (NVLockVgaCrtcs(dev_priv->dev, false))
+ *flags |= 1;
+ if (nv_heads_tied(dev_priv->dev))
+ *flags |= 2;
+
+ if (*flags & 2)
+ NVSetOwner(dev_priv->dev, 0); /* necessary? */
+}
+
+static void
+nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ return;
+
+ if (flags & 2)
+ NVSetOwner(dev_priv->dev, 4);
+ if (flags & 1)
+ NVLockVgaCrtcs(dev_priv->dev, true);
+}
+
+static struct nouveau_i2c_chan *
+nouveau_connector_ddc_detect(struct drm_connector *connector,
+ struct nouveau_encoder **pnv_encoder)
+{
+ struct drm_device *dev = connector->dev;
+ uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
+ int ret, flags, i;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ struct nouveau_i2c_chan *i2c = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_mode_object *obj;
+ int id;
+
+ id = connector->encoder_ids[i];
+ if (!id)
+ break;
+
+ obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+ nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+ if (nv_encoder->dcb->i2c_index < 0xf)
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!i2c)
+ continue;
+
+ nouveau_connector_ddc_prepare(connector, &flags);
+ ret = i2c_transfer(&i2c->adapter, msgs, 2);
+ nouveau_connector_ddc_finish(connector, flags);
+
+ if (ret == 2) {
+ *pnv_encoder = nv_encoder;
+ return i2c;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+nouveau_connector_set_encoder(struct drm_connector *connector,
+ struct nouveau_encoder *nv_encoder)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct drm_device *dev = connector->dev;
+
+ if (nv_connector->detected_encoder == nv_encoder)
+ return;
+ nv_connector->detected_encoder = nv_encoder;
+
+ if (nv_encoder->dcb->type == OUTPUT_LVDS ||
+ nv_encoder->dcb->type == OUTPUT_TMDS) {
+ connector->doublescan_allowed = false;
+ connector->interlace_allowed = false;
+ } else {
+ connector->doublescan_allowed = true;
+ if (dev_priv->card_type == NV_20 ||
+ (dev_priv->card_type == NV_10 &&
+ (dev->pci_device & 0x0ff0) != 0x0100 &&
+ (dev->pci_device & 0x0ff0) != 0x0150))
+ /* HW is broken */
+ connector->interlace_allowed = false;
+ else
+ connector->interlace_allowed = true;
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dvi_i_subconnector_property,
+ nv_encoder->dcb->type == OUTPUT_TMDS ?
+ DRM_MODE_SUBCONNECTOR_DVID :
+ DRM_MODE_SUBCONNECTOR_DVIA);
+ }
+}
+
+static enum drm_connector_status
+nouveau_connector_detect(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = NULL;
+ struct nouveau_i2c_chan *i2c;
+ int type, flags;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ if (nv_encoder && nv_connector->native_mode) {
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
+ i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
+ if (i2c) {
+ nouveau_connector_ddc_prepare(connector, &flags);
+ nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+ nouveau_connector_ddc_finish(connector, flags);
+ drm_mode_connector_update_edid_property(connector,
+ nv_connector->edid);
+ if (!nv_connector->edid) {
+ NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
+ drm_get_connector_name(connector));
+ return connector_status_disconnected;
+ }
+
+ if (nv_encoder->dcb->type == OUTPUT_DP &&
+ !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
+ NV_ERROR(dev, "Detected %s, but failed init\n",
+ drm_get_connector_name(connector));
+ return connector_status_disconnected;
+ }
+
+ /* Override encoder type for DVI-I based on whether EDID
+ * says the display is digital or analog, both use the
+ * same i2c channel so the value returned from ddc_detect
+ * isn't necessarily correct.
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
+ type = OUTPUT_TMDS;
+ else
+ type = OUTPUT_ANALOG;
+
+ nv_encoder = find_encoder_by_type(connector, type);
+ if (!nv_encoder) {
+ NV_ERROR(dev, "Detected %d encoder on %s, "
+ "but no object!\n", type,
+ drm_get_connector_name(connector));
+ return connector_status_disconnected;
+ }
+ }
+
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
+ if (!nv_encoder)
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
+ if (nv_encoder) {
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+ struct drm_encoder_helper_funcs *helper =
+ encoder->helper_private;
+
+ if (helper->detect(encoder, connector) ==
+ connector_status_connected) {
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
+ }
+
+ return connector_status_disconnected;
+}
+
+static void
+nouveau_connector_force(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ int type;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (connector->force == DRM_FORCE_ON_DIGITAL)
+ type = OUTPUT_TMDS;
+ else
+ type = OUTPUT_ANALOG;
+ } else
+ type = OUTPUT_ANY;
+
+ nv_encoder = find_encoder_by_type(connector, type);
+ if (!nv_encoder) {
+ NV_ERROR(dev, "can't find encoder to force %s on!\n",
+ drm_get_connector_name(connector));
+ connector->status = connector_status_disconnected;
+ return;
+ }
+
+ nouveau_connector_set_encoder(connector, nv_encoder);
+}
+
+static int
+nouveau_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ /* Scaling mode */
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct nouveau_crtc *nv_crtc = NULL;
+ bool modeset = false;
+
+ switch (value) {
+ case DRM_MODE_SCALE_NONE:
+ case DRM_MODE_SCALE_FULLSCREEN:
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* LVDS always needs gpu scaling */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ value == DRM_MODE_SCALE_NONE)
+ return -EINVAL;
+
+ /* Changing between GPU and panel scaling requires a full
+ * modeset
+ */
+ if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
+ (value == DRM_MODE_SCALE_NONE))
+ modeset = true;
+ nv_connector->scaling_mode = value;
+
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
+ if (!nv_crtc)
+ return 0;
+
+ if (modeset || !nv_crtc->set_scale) {
+ ret = drm_crtc_helper_set_mode(&nv_crtc->base,
+ &nv_crtc->base.mode,
+ nv_crtc->base.x,
+ nv_crtc->base.y, NULL);
+ if (!ret)
+ return -EINVAL;
+ } else {
+ ret = nv_crtc->set_scale(nv_crtc, value, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Dithering */
+ if (property == dev->mode_config.dithering_mode_property) {
+ struct nouveau_crtc *nv_crtc = NULL;
+
+ if (value == DRM_MODE_DITHERING_ON)
+ nv_connector->use_dithering = true;
+ else
+ nv_connector->use_dithering = false;
+
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
+ if (!nv_crtc || !nv_crtc->set_dither)
+ return 0;
+
+ return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
+ true);
+ }
+
+ if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
+ return get_slave_funcs(nv_encoder)->
+ set_property(to_drm_encoder(nv_encoder), connector, property, value);
+
+ return -EINVAL;
+}
+
+static struct drm_display_mode *
+nouveau_connector_native_mode(struct nouveau_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_display_mode *mode, *largest = NULL;
+ int high_w = 0, high_h = 0, high_v = 0;
+
+ /* Use preferred mode if there is one.. */
+ list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ NV_DEBUG(dev, "native mode from preferred\n");
+ return drm_mode_duplicate(dev, mode);
+ }
+ }
+
+ /* Otherwise, take the resolution with the largest width, then height,
+ * then vertical refresh
+ */
+ list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ if (mode->hdisplay < high_w)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+ mode->vrefresh < high_v)
+ continue;
+
+ high_w = mode->hdisplay;
+ high_h = mode->vdisplay;
+ high_v = mode->vrefresh;
+ largest = mode;
+ }
+
+ NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n",
+ high_w, high_h, high_v);
+ return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+struct moderec {
+ int hdisplay;
+ int vdisplay;
+};
+
+static struct moderec scaler_modes[] = {
+ { 1920, 1200 },
+ { 1920, 1080 },
+ { 1680, 1050 },
+ { 1600, 1200 },
+ { 1400, 1050 },
+ { 1280, 1024 },
+ { 1280, 960 },
+ { 1152, 864 },
+ { 1024, 768 },
+ { 800, 600 },
+ { 720, 400 },
+ { 640, 480 },
+ { 640, 400 },
+ { 640, 350 },
+ {}
+};
+
+static int
+nouveau_connector_scaler_modes_add(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_display_mode *native = nv_connector->native_mode, *m;
+ struct drm_device *dev = connector->dev;
+ struct moderec *mode = &scaler_modes[0];
+ int modes = 0;
+
+ if (!native)
+ return 0;
+
+ while (mode->hdisplay) {
+ if (mode->hdisplay <= native->hdisplay &&
+ mode->vdisplay <= native->vdisplay) {
+ m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
+ drm_mode_vrefresh(native), false,
+ false, false);
+ if (!m)
+ continue;
+
+ m->type |= DRM_MODE_TYPE_DRIVER;
+
+ drm_mode_probed_add(connector, m);
+ modes++;
+ }
+
+ mode++;
+ }
+
+ return modes;
+}
+
+static int
+nouveau_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ int ret = 0;
+
+ /* If we're not LVDS, destroy the previous native mode, the attached
+ * monitor could have changed.
+ */
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+ nv_connector->native_mode) {
+ drm_mode_destroy(dev, nv_connector->native_mode);
+ nv_connector->native_mode = NULL;
+ }
+
+ if (nv_connector->edid)
+ ret = drm_add_edid_modes(connector, nv_connector->edid);
+
+ /* Find the native mode if this is a digital panel, if we didn't
+ * find any modes through DDC previously add the native mode to
+ * the list of modes.
+ */
+ if (!nv_connector->native_mode)
+ nv_connector->native_mode =
+ nouveau_connector_native_mode(nv_connector);
+ if (ret == 0 && nv_connector->native_mode) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(dev, nv_connector->native_mode);
+ drm_mode_probed_add(connector, mode);
+ ret = 1;
+ }
+
+ if (nv_encoder->dcb->type == OUTPUT_TV)
+ ret = get_slave_funcs(nv_encoder)->
+ get_modes(to_drm_encoder(nv_encoder), connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ ret += nouveau_connector_scaler_modes_add(connector);
+
+ return ret;
+}
+
+static int
+nouveau_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ unsigned min_clock = 25000, max_clock = min_clock;
+ unsigned clock = mode->clock;
+
+ switch (nv_encoder->dcb->type) {
+ case OUTPUT_LVDS:
+ BUG_ON(!nv_connector->native_mode);
+ if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
+ mode->vdisplay > nv_connector->native_mode->vdisplay)
+ return MODE_PANEL;
+
+ min_clock = 0;
+ max_clock = 400000;
+ break;
+ case OUTPUT_TMDS:
+ if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
+ (dev_priv->card_type < NV_50 &&
+ !nv_encoder->dcb->duallink_possible))
+ max_clock = 165000;
+ else
+ max_clock = 330000;
+ break;
+ case OUTPUT_ANALOG:
+ max_clock = nv_encoder->dcb->crtconf.maxfreq;
+ if (!max_clock)
+ max_clock = 350000;
+ break;
+ case OUTPUT_TV:
+ return get_slave_funcs(nv_encoder)->
+ mode_valid(to_drm_encoder(nv_encoder), mode);
+ case OUTPUT_DP:
+ if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
+ max_clock = nv_encoder->dp.link_nr * 270000;
+ else
+ max_clock = nv_encoder->dp.link_nr * 162000;
+
+ clock *= 3;
+ break;
+ }
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
+
+ if (clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+nouveau_connector_best_encoder(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+ if (nv_connector->detected_encoder)
+ return to_drm_encoder(nv_connector->detected_encoder);
+
+ return NULL;
+}
+
+static const struct drm_connector_helper_funcs
+nouveau_connector_helper_funcs = {
+ .get_modes = nouveau_connector_get_modes,
+ .mode_valid = nouveau_connector_mode_valid,
+ .best_encoder = nouveau_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs
+nouveau_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = NULL,
+ .restore = NULL,
+ .detect = nouveau_connector_detect,
+ .destroy = nouveau_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = nouveau_connector_set_property,
+ .force = nouveau_connector_force
+};
+
+static int
+nouveau_connector_create_lvds(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_display_mode native, *mode, *temp;
+ bool dummy, if_is_24bit = false;
+ int ret, flags;
+
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ if (!nv_encoder)
+ return -ENODEV;
+
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
+ if (ret) {
+ NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
+ return ret;
+ }
+ nv_connector->use_dithering = !if_is_24bit;
+
+ /* Firstly try getting EDID over DDC, if allowed and I2C channel
+ * is available.
+ */
+ if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+ if (i2c) {
+ nouveau_connector_ddc_prepare(connector, &flags);
+ nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+ nouveau_connector_ddc_finish(connector, flags);
+ }
+
+ /* If no EDID found above, and the VBIOS indicates a hardcoded
+ * modeline is avalilable for the panel, set it as the panel's
+ * native mode and exit.
+ */
+ if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
+ (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
+ dev_priv->VBIOS.pub.fp_no_ddc)) {
+ nv_connector->native_mode = drm_mode_duplicate(dev, &native);
+ goto out;
+ }
+
+ /* Still nothing, some VBIOS images have a hardcoded EDID block
+ * stored for the panel stored in them.
+ */
+ if (!nv_connector->edid && !nv_connector->native_mode &&
+ !dev_priv->VBIOS.pub.fp_no_ddc) {
+ nv_connector->edid =
+ (struct edid *)nouveau_bios_embedded_edid(dev);
+ }
+
+ if (!nv_connector->edid)
+ goto out;
+
+ /* We didn't find/use a panel mode from the VBIOS, so parse the EDID
+ * block and look for the preferred mode there.
+ */
+ ret = drm_add_edid_modes(connector, nv_connector->edid);
+ if (ret == 0)
+ goto out;
+ nv_connector->detected_encoder = nv_encoder;
+ nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
+ list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+out:
+ if (!nv_connector->native_mode) {
+ NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
+ "determine its native mode. Disabling.\n");
+ return -ENODEV;
+ }
+
+ drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+ return 0;
+}
+
+int
+nouveau_connector_create(struct drm_device *dev, int index, int type)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_connector *nv_connector = NULL;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+ if (!nv_connector)
+ return -ENOMEM;
+ nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
+ connector = &nv_connector->base;
+
+ switch (type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ NV_INFO(dev, "Detected a VGA connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ NV_INFO(dev, "Detected a DVI-D connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ NV_INFO(dev, "Detected a DVI-I connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ NV_INFO(dev, "Detected a LVDS connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_TV:
+ NV_INFO(dev, "Detected a TV connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ NV_INFO(dev, "Detected a DisplayPort connector\n");
+ break;
+ default:
+ NV_ERROR(dev, "Unknown connector, this is not good.\n");
+ break;
+ }
+
+ /* defaults, will get overridden in detect() */
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
+ drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
+ /* Init DVI-I specific properties */
+ if (type == DRM_MODE_CONNECTOR_DVII) {
+ drm_mode_create_dvi_i_properties(dev);
+ drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+ drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+ }
+
+ if (type != DRM_MODE_CONNECTOR_LVDS)
+ nv_connector->use_dithering = false;
+
+ if (type == DRM_MODE_CONNECTOR_DVID ||
+ type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_LVDS ||
+ type == DRM_MODE_CONNECTOR_DisplayPort) {
+ nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
+
+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
+ nv_connector->scaling_mode);
+ drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
+ nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
+ : DRM_MODE_DITHERING_OFF);
+
+ } else {
+ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+
+ if (type == DRM_MODE_CONNECTOR_VGA &&
+ dev_priv->card_type >= NV_50) {
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ nv_connector->scaling_mode);
+ }
+ }
+
+ /* attach encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->connector != index)
+ continue;
+
+ if (get_slave_funcs(nv_encoder))
+ get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ }
+
+ drm_sysfs_connector_add(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ ret = nouveau_connector_create_lvds(dev, connector);
+ if (ret) {
+ connector->funcs->destroy(connector);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
new file mode 100644
index 00000000000..728b8090e5f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CONNECTOR_H__
+#define __NOUVEAU_CONNECTOR_H__
+
+#include "drm_edid.h"
+#include "nouveau_i2c.h"
+
+struct nouveau_connector {
+ struct drm_connector base;
+
+ struct dcb_connector_table_entry *dcb;
+
+ int scaling_mode;
+ bool use_dithering;
+
+ struct nouveau_encoder *detected_encoder;
+ struct edid *edid;
+ struct drm_display_mode *native_mode;
+};
+
+static inline struct nouveau_connector *nouveau_connector(
+ struct drm_connector *con)
+{
+ return container_of(con, struct nouveau_connector, base);
+}
+
+int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
+
+#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
new file mode 100644
index 00000000000..49fa7b2d257
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CRTC_H__
+#define __NOUVEAU_CRTC_H__
+
+struct nouveau_crtc {
+ struct drm_crtc base;
+
+ int index;
+
+ struct drm_display_mode *mode;
+
+ uint32_t dpms_saved_fp_control;
+ uint32_t fp_users;
+ int saturation;
+ int sharpness;
+ int last_dpms;
+
+ struct {
+ int cpp;
+ bool blanked;
+ uint32_t offset;
+ uint32_t tile_flags;
+ } fb;
+
+ struct {
+ struct nouveau_bo *nvbo;
+ bool visible;
+ uint32_t offset;
+ void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
+ void (*set_pos)(struct nouveau_crtc *, int x, int y);
+ void (*hide)(struct nouveau_crtc *, bool update);
+ void (*show)(struct nouveau_crtc *, bool update);
+ } cursor;
+
+ struct {
+ struct nouveau_bo *nvbo;
+ uint16_t r[256];
+ uint16_t g[256];
+ uint16_t b[256];
+ int depth;
+ } lut;
+
+ int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
+ int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+};
+
+static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct nouveau_crtc, base);
+}
+
+static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
+{
+ return &crtc->base;
+}
+
+int nv50_crtc_create(struct drm_device *dev, int index);
+int nv50_cursor_init(struct nouveau_crtc *);
+void nv50_cursor_fini(struct nouveau_crtc *);
+int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
+ uint32_t buffer_handle, uint32_t width,
+ uint32_t height);
+int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
+
+int nv04_cursor_init(struct nouveau_crtc *);
+
+struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
+
+#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 00000000000..d79db3698f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+static int
+nouveau_debugfs_channel_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct nouveau_channel *chan = node->info_ent->data;
+
+ seq_printf(m, "channel id : %d\n", chan->id);
+
+ seq_printf(m, "cpu fifo state:\n");
+ seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
+ seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
+ seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
+ seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
+ seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
+
+ seq_printf(m, "gpu fifo state:\n");
+ seq_printf(m, " get: 0x%08x\n",
+ nvchan_rd32(chan, chan->user_get));
+ seq_printf(m, " put: 0x%08x\n",
+ nvchan_rd32(chan, chan->user_put));
+
+ seq_printf(m, "last fence : %d\n", chan->fence.sequence);
+ seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
+ return 0;
+}
+
+int
+nouveau_debugfs_channel_init(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_minor *minor = chan->dev->primary;
+ int ret;
+
+ if (!dev_priv->debugfs.channel_root) {
+ dev_priv->debugfs.channel_root =
+ debugfs_create_dir("channel", minor->debugfs_root);
+ if (!dev_priv->debugfs.channel_root)
+ return -ENOENT;
+ }
+
+ snprintf(chan->debugfs.name, 32, "%d", chan->id);
+ chan->debugfs.info.name = chan->debugfs.name;
+ chan->debugfs.info.show = nouveau_debugfs_channel_info;
+ chan->debugfs.info.driver_features = 0;
+ chan->debugfs.info.data = chan;
+
+ ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
+ dev_priv->debugfs.channel_root,
+ chan->dev->primary);
+ if (ret == 0)
+ chan->debugfs.active = true;
+ return ret;
+}
+
+void
+nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+
+ if (!chan->debugfs.active)
+ return;
+
+ drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
+ chan->debugfs.active = false;
+
+ if (chan == dev_priv->channel) {
+ debugfs_remove(dev_priv->debugfs.channel_root);
+ dev_priv->debugfs.channel_root = NULL;
+ }
+}
+
+static int
+nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t ppci_0;
+
+ ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
+
+ seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
+ seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
+ ppci_0 & 0xffff, ppci_0 >> 16);
+ return 0;
+}
+
+static int
+nouveau_debugfs_memory_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+
+ seq_printf(m, "VRAM total: %dKiB\n",
+ (int)(nouveau_mem_fb_amount(dev) >> 10));
+ return 0;
+}
+
+static struct drm_info_list nouveau_debugfs_list[] = {
+ { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
+ { "memory", nouveau_debugfs_memory_info, 0, NULL },
+};
+#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+
+int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ return 0;
+}
+
+void
+nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
new file mode 100644
index 00000000000..dfc94391d71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+
+static void
+nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct drm_device *dev = drm_fb->dev;
+
+ if (drm_fb->fbdev)
+ nouveau_fbcon_remove(dev, drm_fb);
+
+ if (fb->nvbo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(fb->nvbo->gem);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ drm_framebuffer_cleanup(drm_fb);
+ kfree(fb);
+}
+
+static int
+nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+
+ return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+}
+
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+ .destroy = nouveau_user_framebuffer_destroy,
+ .create_handle = nouveau_user_framebuffer_create_handle,
+};
+
+struct drm_framebuffer *
+nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct nouveau_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+ if (!fb)
+ return NULL;
+
+ ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ if (ret) {
+ kfree(fb);
+ return NULL;
+ }
+
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+
+ fb->nvbo = nvbo;
+ return &fb->base;
+}
+
+static struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ if (!gem)
+ return NULL;
+
+ fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
+ if (!fb) {
+ drm_gem_object_unreference(gem);
+ return NULL;
+ }
+
+ return fb;
+}
+
+const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+ .fb_create = nouveau_user_framebuffer_create,
+ .fb_changed = nouveau_fbcon_probe,
+};
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 00000000000..703553687b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+int
+nouveau_dma_init(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *m2mf = NULL;
+ int ret, i;
+
+ /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+ ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
+ 0x0039 : 0x5039, &m2mf);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
+ if (ret)
+ return ret;
+
+ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+ if (ret)
+ return ret;
+
+ /* Map push buffer */
+ ret = nouveau_bo_map(chan->pushbuf_bo);
+ if (ret)
+ return ret;
+
+ /* Map M2MF notifier object - fbcon. */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = nouveau_bo_map(chan->notifier_bo);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialise DMA vars */
+ chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ chan->dma.put = 0;
+ chan->dma.cur = chan->dma.put;
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+
+ /* Insert NOPS for NOUVEAU_DMA_SKIPS */
+ ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(chan, 0);
+
+ /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
+ OUT_RING(chan, NvM2MF);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
+ OUT_RING(chan, NvNotify0);
+
+ /* Sit back and pray the channel works.. */
+ FIRE_RING(chan);
+
+ return 0;
+}
+
+void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
+{
+ bool is_iomem;
+ u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
+ mem = &mem[chan->dma.cur];
+ if (is_iomem)
+ memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
+ else
+ memcpy(mem, data, nr_dwords * 4);
+ chan->dma.cur += nr_dwords;
+}
+
+static inline bool
+READ_GET(struct nouveau_channel *chan, uint32_t *get)
+{
+ uint32_t val;
+
+ val = nvchan_rd32(chan, chan->user_get);
+ if (val < chan->pushbuf_base ||
+ val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
+ /* meaningless to dma_wait() except to know whether the
+ * GPU has stalled or not
+ */
+ *get = val;
+ return false;
+ }
+
+ *get = (val - chan->pushbuf_base) >> 2;
+ return true;
+}
+
+int
+nouveau_dma_wait(struct nouveau_channel *chan, int size)
+{
+ uint32_t get, prev_get = 0, cnt = 0;
+ bool get_valid;
+
+ while (chan->dma.free < size) {
+ /* reset counter as long as GET is still advancing, this is
+ * to avoid misdetecting a GPU lockup if the GPU happens to
+ * just be processing an operation that takes a long time
+ */
+ get_valid = READ_GET(chan, &get);
+ if (get != prev_get) {
+ prev_get = get;
+ cnt = 0;
+ }
+
+ if ((++cnt & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (cnt > 100000)
+ return -EBUSY;
+ }
+
+ /* loop until we have a usable GET pointer. the value
+ * we read from the GPU may be outside the main ring if
+ * PFIFO is processing a buffer called from the main ring,
+ * discard these values until something sensible is seen.
+ *
+ * the other case we discard GET is while the GPU is fetching
+ * from the SKIPS area, so the code below doesn't have to deal
+ * with some fun corner cases.
+ */
+ if (!get_valid || get < NOUVEAU_DMA_SKIPS)
+ continue;
+
+ if (get <= chan->dma.cur) {
+ /* engine is fetching behind us, or is completely
+ * idle (GET == PUT) so we have free space up until
+ * the end of the push buffer
+ *
+ * we can only hit that path once per call due to
+ * looping back to the beginning of the push buffer,
+ * we'll hit the fetching-ahead-of-us path from that
+ * point on.
+ *
+ * the *one* exception to that rule is if we read
+ * GET==PUT, in which case the below conditional will
+ * always succeed and break us out of the wait loop.
+ */
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+ if (chan->dma.free >= size)
+ break;
+
+ /* not enough space left at the end of the push buffer,
+ * instruct the GPU to jump back to the start right
+ * after processing the currently pending commands.
+ */
+ OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+ WRITE_PUT(NOUVEAU_DMA_SKIPS);
+
+ /* we're now submitting commands at the start of
+ * the push buffer.
+ */
+ chan->dma.cur =
+ chan->dma.put = NOUVEAU_DMA_SKIPS;
+ }
+
+ /* engine fetching ahead of us, we have space up until the
+ * current GET pointer. the "- 1" is to ensure there's
+ * space left to emit a jump back to the beginning of the
+ * push buffer if we require it. we can never get GET == PUT
+ * here, so this is safe.
+ */
+ chan->dma.free = get - chan->dma.cur - 1;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 00000000000..04e85d8f757
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_DMA_H__
+#define __NOUVEAU_DMA_H__
+
+#ifndef NOUVEAU_DMA_DEBUG
+#define NOUVEAU_DMA_DEBUG 0
+#endif
+
+/*
+ * There's a hw race condition where you can't jump to your PUT offset,
+ * to avoid this we jump to offset + SKIPS and fill the difference with
+ * NOPs.
+ *
+ * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
+ * a SKIPS value of 8. Lets assume that the race condition is to do
+ * with writing into the fetch area, we configure a fetch size of 128
+ * bytes so we need a larger SKIPS value.
+ */
+#define NOUVEAU_DMA_SKIPS (128 / 4)
+
+/* Hardcoded object assignments to subchannels (subchannel id). */
+enum {
+ NvSubM2MF = 0,
+ NvSub2D = 1,
+ NvSubCtxSurf2D = 1,
+ NvSubGdiRect = 2,
+ NvSubImageBlit = 3
+};
+
+/* Object handles. */
+enum {
+ NvM2MF = 0x80000001,
+ NvDmaFB = 0x80000002,
+ NvDmaTT = 0x80000003,
+ NvDmaVRAM = 0x80000004,
+ NvDmaGART = 0x80000005,
+ NvNotify0 = 0x80000006,
+ Nv2D = 0x80000007,
+ NvCtxSurf2D = 0x80000008,
+ NvRop = 0x80000009,
+ NvImagePatt = 0x8000000a,
+ NvClipRect = 0x8000000b,
+ NvGdiRect = 0x8000000c,
+ NvImageBlit = 0x8000000d,
+
+ /* G80+ display objects */
+ NvEvoVRAM = 0x01000000,
+ NvEvoFB16 = 0x01000001,
+ NvEvoFB32 = 0x01000002
+};
+
+#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
+#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
+#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
+
+#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
+
+static __must_check inline int
+RING_SPACE(struct nouveau_channel *chan, int size)
+{
+ if (chan->dma.free < size) {
+ int ret;
+
+ ret = nouveau_dma_wait(chan, size);
+ if (ret)
+ return ret;
+ }
+
+ chan->dma.free -= size;
+ return 0;
+}
+
+static inline void
+OUT_RING(struct nouveau_channel *chan, int data)
+{
+ if (NOUVEAU_DMA_DEBUG) {
+ NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
+ chan->id, chan->dma.cur << 2, data);
+ }
+
+ nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
+}
+
+extern void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
+
+static inline void
+BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
+}
+
+#define WRITE_PUT(val) do { \
+ DRM_MEMORYBARRIER(); \
+ nouveau_bo_rd32(chan->pushbuf_bo, 0); \
+ nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
+} while (0)
+
+static inline void
+FIRE_RING(struct nouveau_channel *chan)
+{
+ if (NOUVEAU_DMA_DEBUG) {
+ NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
+ chan->id, chan->dma.cur << 2);
+ }
+
+ if (chan->dma.cur == chan->dma.put)
+ return;
+ chan->accel_done = true;
+
+ WRITE_PUT(chan->dma.cur);
+ chan->dma.put = chan->dma.cur;
+}
+
+static inline void
+WIND_RING(struct nouveau_channel *chan)
+{
+ chan->dma.cur = chan->dma.put;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
new file mode 100644
index 00000000000..de61f4640e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_encoder.h"
+
+static int
+auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
+ return ret;
+}
+
+static int
+nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
+ NV50_SOR_DP_CTRL_LANE_MASK);
+ tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
+ if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+
+ return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ int reg = 0x614300 + (nv_encoder->or * 0x800);
+
+ tmp = nv_rd32(dev, reg);
+ tmp &= 0xfff3ffff;
+ if (cmd == DP_LINK_BW_2_7)
+ tmp |= 0x00040000;
+ nv_wr32(dev, reg, tmp);
+
+ return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ uint8_t cmd;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+ int ret;
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
+ tmp |= (pattern << 24);
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+
+ ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+ if (ret)
+ return ret;
+ cmd &= ~DP_TRAINING_PATTERN_MASK;
+ cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
+ return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int i, dpe_headerlen, max_vs = 0;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ for (i = 0; i < dpe_headerlen; i++, dpse++) {
+ if (dpse->vs_level > max_vs)
+ max_vs = dpse->vs_level;
+ }
+
+ return max_vs;
+}
+
+static int
+nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int i, dpe_headerlen, max_pre = 0;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ for (i = 0; i < dpe_headerlen; i++, dpse++) {
+ if (dpse->vs_level != vs)
+ continue;
+
+ if (dpse->pre_level > max_pre)
+ max_pre = dpse->pre_level;
+ }
+
+ return max_pre;
+}
+
+static bool
+nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int ret, i, dpe_headerlen, vs = 0, pre = 0;
+ uint8_t request[2];
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
+ if (ret)
+ return false;
+
+ NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
+
+ /* Keep all lanes at the same level.. */
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
+ int lane_vs = lane_req & 3;
+ int lane_pre = (lane_req >> 2) & 3;
+
+ if (lane_vs > vs)
+ vs = lane_vs;
+ if (lane_pre > pre)
+ pre = lane_pre;
+ }
+
+ if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
+ vs = nouveau_dp_max_voltage_swing(encoder);
+ vs |= 4;
+ }
+
+ if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
+ pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
+ pre |= 4;
+ }
+
+ /* Update the configuration for all lanes.. */
+ for (i = 0; i < nv_encoder->dp.link_nr; i++)
+ config[i] = (pre << 3) | vs;
+
+ return true;
+}
+
+static bool
+nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+ int dpe_headerlen, ret, i;
+
+ NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ config[0], config[1], config[2], config[3]);
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ for (i = 0; i < dpe->record_nr; i++, dpse++) {
+ if (dpse->vs_level == (config[0] & 3) &&
+ dpse->pre_level == ((config[0] >> 3) & 3))
+ break;
+ }
+ BUG_ON(i == dpe->record_nr);
+
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ const int shift[4] = { 16, 8, 0, 24 };
+ uint32_t mask = 0xff << shift[i];
+ uint32_t reg0, reg1, reg2;
+
+ reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
+ reg0 |= (dpse->reg0 << shift[i]);
+ reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
+ reg1 |= (dpse->reg1 << shift[i]);
+ reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
+ reg2 |= (dpse->reg2 << 8);
+ nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
+ nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
+ nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
+ }
+
+ ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+bool
+nouveau_dp_link_train(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint8_t config[4];
+ uint8_t status[3];
+ bool cr_done, cr_max_vs, eq_done;
+ int ret = 0, i, tries, voltage;
+
+ NV_DEBUG(dev, "link training!!\n");
+train:
+ cr_done = eq_done = false;
+
+ /* set link configuration */
+ NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n",
+ nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
+
+ ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
+ if (ret)
+ return false;
+
+ config[0] = nv_encoder->dp.link_nr;
+ if (nv_encoder->dp.dpcd_version >= 0x11)
+ config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ ret = nouveau_dp_lane_count_set(encoder, config[0]);
+ if (ret)
+ return false;
+
+ /* clock recovery */
+ NV_DEBUG(dev, "\tbegin cr\n");
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto stop;
+
+ tries = 0;
+ voltage = -1;
+ memset(config, 0x00, sizeof(config));
+ for (;;) {
+ if (!nouveau_dp_link_train_commit(encoder, config))
+ break;
+
+ udelay(100);
+
+ ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
+ if (ret)
+ break;
+ NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ status[0], status[1]);
+
+ cr_done = true;
+ cr_max_vs = false;
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
+ if (!(lane & DP_LANE_CR_DONE)) {
+ cr_done = false;
+ if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
+ cr_max_vs = true;
+ break;
+ }
+ }
+
+ if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
+ voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ tries = 0;
+ }
+
+ if (cr_done || cr_max_vs || (++tries == 5))
+ break;
+
+ if (!nouveau_dp_link_train_adjust(encoder, config))
+ break;
+ }
+
+ if (!cr_done)
+ goto stop;
+
+ /* channel equalisation */
+ NV_DEBUG(dev, "\tbegin eq\n");
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
+ if (ret)
+ goto stop;
+
+ for (tries = 0; tries <= 5; tries++) {
+ udelay(400);
+
+ ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
+ if (ret)
+ break;
+ NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ status[0], status[1]);
+
+ eq_done = true;
+ if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
+ eq_done = false;
+
+ for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
+ int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
+ if (!(lane & DP_LANE_CR_DONE)) {
+ cr_done = false;
+ break;
+ }
+
+ if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
+ !(lane & DP_LANE_SYMBOL_LOCKED)) {
+ eq_done = false;
+ break;
+ }
+ }
+
+ if (eq_done || !cr_done)
+ break;
+
+ if (!nouveau_dp_link_train_adjust(encoder, config) ||
+ !nouveau_dp_link_train_commit(encoder, config))
+ break;
+ }
+
+stop:
+ /* end link training */
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
+ if (ret)
+ return false;
+
+ /* retry at a lower setting, if possible */
+ if (!ret && !(eq_done && cr_done)) {
+ NV_DEBUG(dev, "\twe failed\n");
+ if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
+ NV_DEBUG(dev, "retry link training at low rate\n");
+ nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+ goto train;
+ }
+ }
+
+ return eq_done;
+}
+
+bool
+nouveau_dp_detect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ uint8_t dpcd[4];
+ int ret;
+
+ ret = auxch_rd(encoder, 0x0000, dpcd, 4);
+ if (ret)
+ return false;
+
+ NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n"
+ "display: link_bw %d, link_nr %d version 0x%02x\n",
+ nv_encoder->dcb->dpconf.link_bw,
+ nv_encoder->dcb->dpconf.link_nr,
+ dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
+
+ nv_encoder->dp.dpcd_version = dpcd[0];
+
+ nv_encoder->dp.link_bw = dpcd[1];
+ if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
+ !nv_encoder->dcb->dpconf.link_bw)
+ nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+
+ nv_encoder->dp.link_nr = dpcd[2] & 0xf;
+ if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
+ nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
+
+ return true;
+}
+
+int
+nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ uint8_t *data, int data_nr)
+{
+ struct drm_device *dev = auxch->dev;
+ uint32_t tmp, ctrl, stat = 0, data32[4] = {};
+ int ret = 0, i, index = auxch->rd;
+
+ NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
+
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ if (!(tmp & 0x01000000)) {
+ NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
+ ret = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < 3; i++) {
+ tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
+ if (tmp & NV50_AUXCH_STAT_STATE_READY)
+ break;
+ udelay(100);
+ }
+
+ if (i == 3) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!(cmd & 1)) {
+ memcpy(data32, data, data_nr);
+ for (i = 0; i < 4; i++) {
+ NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]);
+ nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
+ }
+ }
+
+ nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
+ ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
+ ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
+ ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
+ ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
+
+ for (;;) {
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
+ if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
+ NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
+ nv_rd32(dev, NV50_AUXCH_CTRL(index)));
+ return -EBUSY;
+ }
+
+ udelay(400);
+
+ stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
+ if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
+ NV50_AUXCH_STAT_REPLY_AUX_DEFER)
+ break;
+ }
+
+ if (cmd & 1) {
+ for (i = 0; i < 4; i++) {
+ data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
+ NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]);
+ }
+ memcpy(data, data32, data_nr);
+ }
+
+out:
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ if (tmp & 0x01000000) {
+ NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
+ ret = -EIO;
+ }
+
+ udelay(400);
+
+ return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
+}
+
+int
+nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
+ struct drm_device *dev = auxch->dev;
+ int ret = 0, cmd, addr = algo_data->address;
+ uint8_t *buf;
+
+ if (mode == MODE_I2C_READ) {
+ cmd = AUX_I2C_READ;
+ buf = read_byte;
+ } else {
+ cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
+ buf = &write_byte;
+ }
+
+ if (!(mode & MODE_I2C_STOP))
+ cmd |= AUX_I2C_MOT;
+
+ if (mode & MODE_I2C_START)
+ return 1;
+
+ for (;;) {
+ ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
+ case NV50_AUXCH_STAT_REPLY_I2C_ACK:
+ return 1;
+ case NV50_AUXCH_STAT_REPLY_I2C_NACK:
+ return -EREMOTEIO;
+ case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
+ udelay(100);
+ break;
+ default:
+ NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
+ return -EREMOTEIO;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
new file mode 100644
index 00000000000..35249c35118
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+#include "nv50_display.h"
+
+#include "drm_pciids.h"
+
+MODULE_PARM_DESC(noagp, "Disable AGP");
+int nouveau_noagp;
+module_param_named(noagp, nouveau_noagp, int, 0400);
+
+MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
+static int nouveau_modeset = -1; /* kms */
+module_param_named(modeset, nouveau_modeset, int, 0400);
+
+MODULE_PARM_DESC(vbios, "Override default VBIOS location");
+char *nouveau_vbios;
+module_param_named(vbios, nouveau_vbios, charp, 0400);
+
+MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
+int nouveau_vram_pushbuf;
+module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+
+MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
+int nouveau_vram_notify;
+module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
+
+MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
+int nouveau_duallink = 1;
+module_param_named(duallink, nouveau_duallink, int, 0400);
+
+MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
+int nouveau_uscript_lvds = -1;
+module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
+
+MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
+int nouveau_uscript_tmds = -1;
+module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
+
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+ "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
+ "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
+ "\t\tDefault: PAL\n"
+ "\t\t*NOTE* Ignored for cards with external TV encoders.");
+char *nouveau_tv_norm;
+module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
+
+MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
+ "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
+ "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
+ "\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
+int nouveau_reg_debug;
+module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
+
+int nouveau_fbpercrtc;
+#if 0
+module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
+#endif
+
+static struct pci_device_id pciidlist[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+ .class = PCI_BASE_CLASS_DISPLAY << 16,
+ .class_mask = 0xff << 16,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
+ .class = PCI_BASE_CLASS_DISPLAY << 16,
+ .class_mask = 0xff << 16,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static struct drm_driver driver;
+
+static int __devinit
+nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &driver);
+}
+
+static void
+nouveau_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int
+nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ struct drm_crtc *crtc;
+ uint32_t fbdev_flags;
+ int ret, i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (pm_state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+ fbdev_flags = dev_priv->fbdev_info->flags;
+ dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ nouveau_bo_unpin(nouveau_fb->nvbo);
+ }
+
+ NV_INFO(dev, "Evicting buffers...\n");
+ ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+
+ NV_INFO(dev, "Idling channels...\n");
+ for (i = 0; i < pfifo->channels; i++) {
+ struct nouveau_fence *fence = NULL;
+
+ chan = dev_priv->fifos[i];
+ if (!chan || (dev_priv->card_type >= NV_50 &&
+ chan == dev_priv->fifos[0]))
+ continue;
+
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret == 0) {
+ ret = nouveau_fence_wait(fence, NULL, false, false);
+ nouveau_fence_unref((void *)&fence);
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
+ chan->id);
+ }
+ }
+
+ pgraph->fifo_access(dev, false);
+ nouveau_wait_for_idle(dev);
+ pfifo->reassign(dev, false);
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pgraph->unload_context(dev);
+
+ NV_INFO(dev, "Suspending GPU objects...\n");
+ ret = nouveau_gpuobj_suspend(dev);
+ if (ret) {
+ NV_ERROR(dev, "... failed: %d\n", ret);
+ goto out_abort;
+ }
+
+ ret = pinstmem->suspend(dev);
+ if (ret) {
+ NV_ERROR(dev, "... failed: %d\n", ret);
+ nouveau_gpuobj_suspend_cleanup(dev);
+ goto out_abort;
+ }
+
+ NV_INFO(dev, "And we're gone!\n");
+ pci_save_state(pdev);
+ if (pm_state.event == PM_EVENT_SUSPEND) {
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ acquire_console_sem();
+ fb_set_suspend(dev_priv->fbdev_info, 1);
+ release_console_sem();
+ dev_priv->fbdev_info->flags = fbdev_flags;
+ return 0;
+
+out_abort:
+ NV_INFO(dev, "Re-enabling acceleration..\n");
+ pfifo->enable(dev);
+ pfifo->reassign(dev, true);
+ pgraph->fifo_access(dev, true);
+ return ret;
+}
+
+static int
+nouveau_pci_resume(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ struct drm_crtc *crtc;
+ uint32_t fbdev_flags;
+ int ret, i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ fbdev_flags = dev_priv->fbdev_info->flags;
+ dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+
+ NV_INFO(dev, "We're back, enabling device...\n");
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev))
+ return -1;
+ pci_set_master(dev->pdev);
+
+ NV_INFO(dev, "POSTing device...\n");
+ ret = nouveau_run_vbios_init(dev);
+ if (ret)
+ return ret;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ ret = nouveau_mem_init_agp(dev);
+ if (ret) {
+ NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
+ return ret;
+ }
+ }
+
+ NV_INFO(dev, "Reinitialising engines...\n");
+ engine->instmem.resume(dev);
+ engine->mc.init(dev);
+ engine->timer.init(dev);
+ engine->fb.init(dev);
+ engine->graph.init(dev);
+ engine->fifo.init(dev);
+
+ NV_INFO(dev, "Restoring GPU objects...\n");
+ nouveau_gpuobj_resume(dev);
+
+ nouveau_irq_postinstall(dev);
+
+ /* Re-write SKIPS, they'll have been lost over the suspend */
+ if (nouveau_vram_pushbuf) {
+ struct nouveau_channel *chan;
+ int j;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->fifos[i];
+ if (!chan)
+ continue;
+
+ for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
+ nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
+ }
+ }
+
+ NV_INFO(dev, "Restoring mode...\n");
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ }
+
+ if (dev_priv->card_type < NV_50) {
+ nv04_display_restore(dev);
+ NVLockVgaCrtcs(dev, false);
+ } else
+ nv50_display_init(dev);
+
+ /* Force CLUT to get re-loaded during modeset */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->lut.depth = 0;
+ }
+
+ acquire_console_sem();
+ fb_set_suspend(dev_priv->fbdev_info, 0);
+ release_console_sem();
+
+ nouveau_fbcon_zfill(dev);
+
+ drm_helper_resume_force_mode(dev);
+ dev_priv->fbdev_info->flags = fbdev_flags;
+ return 0;
+}
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ .load = nouveau_load,
+ .firstopen = nouveau_firstopen,
+ .lastclose = nouveau_lastclose,
+ .unload = nouveau_unload,
+ .preclose = nouveau_preclose,
+#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
+ .debugfs_init = nouveau_debugfs_init,
+ .debugfs_cleanup = nouveau_debugfs_takedown,
+#endif
+ .irq_preinstall = nouveau_irq_preinstall,
+ .irq_postinstall = nouveau_irq_postinstall,
+ .irq_uninstall = nouveau_irq_uninstall,
+ .irq_handler = nouveau_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = nouveau_ioctls,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .ioctl = drm_ioctl,
+ .mmap = nouveau_ttm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = nouveau_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = nouveau_pci_probe,
+ .remove = nouveau_pci_remove,
+ .suspend = nouveau_pci_suspend,
+ .resume = nouveau_pci_resume
+ },
+
+ .gem_init_object = nouveau_gem_object_new,
+ .gem_free_object = nouveau_gem_object_del,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+#ifdef GIT_REVISION
+ .date = GIT_REVISION,
+#else
+ .date = DRIVER_DATE,
+#endif
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init nouveau_init(void)
+{
+ driver.num_ioctls = nouveau_max_ioctl;
+
+ if (nouveau_modeset == -1) {
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ nouveau_modeset = 0;
+ else
+#endif
+ nouveau_modeset = 1;
+ }
+
+ if (nouveau_modeset == 1)
+ driver.driver_features |= DRIVER_MODESET;
+
+ return drm_init(&driver);
+}
+
+static void __exit nouveau_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(nouveau_init);
+module_exit(nouveau_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
new file mode 100644
index 00000000000..88b4c7b77e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -0,0 +1,1286 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRV_H__
+#define __NOUVEAU_DRV_H__
+
+#define DRIVER_AUTHOR "Stephane Marchesin"
+#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
+
+#define DRIVER_NAME "nouveau"
+#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
+#define DRIVER_DATE "20090420"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 15
+
+#define NOUVEAU_FAMILY 0x0000FFFF
+#define NOUVEAU_FLAGS 0xFFFF0000
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+struct nouveau_fpriv {
+ struct ttm_object_file *tfile;
+};
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_bios.h"
+
+#define MAX_NUM_DCB_ENTRIES 16
+
+#define NOUVEAU_MAX_CHANNEL_NR 128
+
+#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
+#define NV50_VM_BLOCK (512*1024*1024ULL)
+#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+
+struct nouveau_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ u32 placements[3];
+ struct ttm_bo_kmap_obj kmap;
+ struct list_head head;
+
+ /* protected by ttm_bo_reserve() */
+ struct drm_file *reserved_by;
+ struct list_head entry;
+ int pbbo_index;
+
+ struct nouveau_channel *channel;
+
+ bool mappable;
+ bool no_vm;
+
+ uint32_t tile_mode;
+ uint32_t tile_flags;
+
+ struct drm_gem_object *gem;
+ struct drm_file *cpu_filp;
+ int pin_refcnt;
+};
+
+static inline struct nouveau_bo *
+nouveau_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct nouveau_bo, bo);
+}
+
+static inline struct nouveau_bo *
+nouveau_gem_object(struct drm_gem_object *gem)
+{
+ return gem ? gem->driver_private : NULL;
+}
+
+/* TODO: submit equivalent to TTM generic API upstream? */
+static inline void __iomem *
+nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
+{
+ bool is_iomem;
+ void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
+ &nvbo->kmap, &is_iomem);
+ WARN_ON_ONCE(ioptr && !is_iomem);
+ return ioptr;
+}
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ uint64_t start;
+ uint64_t size;
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+enum nouveau_flags {
+ NV_NFORCE = 0x10000000,
+ NV_NFORCE2 = 0x20000000
+};
+
+#define NVOBJ_ENGINE_SW 0
+#define NVOBJ_ENGINE_GR 1
+#define NVOBJ_ENGINE_DISPLAY 2
+#define NVOBJ_ENGINE_INT 0xdeadbeef
+
+#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
+#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
+#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
+#define NVOBJ_FLAG_FAKE (1 << 3)
+struct nouveau_gpuobj {
+ struct list_head list;
+
+ struct nouveau_channel *im_channel;
+ struct mem_block *im_pramin;
+ struct nouveau_bo *im_backing;
+ uint32_t im_backing_start;
+ uint32_t *im_backing_suspend;
+ int im_bound;
+
+ uint32_t flags;
+ int refcount;
+
+ uint32_t engine;
+ uint32_t class;
+
+ void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
+ void *priv;
+};
+
+struct nouveau_gpuobj_ref {
+ struct list_head list;
+
+ struct nouveau_gpuobj *gpuobj;
+ uint32_t instance;
+
+ struct nouveau_channel *channel;
+ int handle;
+};
+
+struct nouveau_channel {
+ struct drm_device *dev;
+ int id;
+
+ /* owner of this fifo */
+ struct drm_file *file_priv;
+ /* mapping of the fifo itself */
+ struct drm_local_map *map;
+
+ /* mapping of the regs controling the fifo */
+ void __iomem *user;
+ uint32_t user_get;
+ uint32_t user_put;
+
+ /* Fencing */
+ struct {
+ /* lock protects the pending list only */
+ spinlock_t lock;
+ struct list_head pending;
+ uint32_t sequence;
+ uint32_t sequence_ack;
+ uint32_t last_sequence_irq;
+ } fence;
+
+ /* DMA push buffer */
+ struct nouveau_gpuobj_ref *pushbuf;
+ struct nouveau_bo *pushbuf_bo;
+ uint32_t pushbuf_base;
+
+ /* Notifier memory */
+ struct nouveau_bo *notifier_bo;
+ struct mem_block *notifier_heap;
+
+ /* PFIFO context */
+ struct nouveau_gpuobj_ref *ramfc;
+ struct nouveau_gpuobj_ref *cache;
+
+ /* PGRAPH context */
+ /* XXX may be merge 2 pointers as private data ??? */
+ struct nouveau_gpuobj_ref *ramin_grctx;
+ void *pgraph_ctx;
+
+ /* NV50 VM */
+ struct nouveau_gpuobj *vm_pd;
+ struct nouveau_gpuobj_ref *vm_gart_pt;
+ struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
+
+ /* Objects */
+ struct nouveau_gpuobj_ref *ramin; /* Private instmem */
+ struct mem_block *ramin_heap; /* Private PRAMIN heap */
+ struct nouveau_gpuobj_ref *ramht; /* Hash table */
+ struct list_head ramht_refs; /* Objects referenced by RAMHT */
+
+ /* GPU object info for stuff used in-kernel (mm_enabled) */
+ uint32_t m2mf_ntfy;
+ uint32_t vram_handle;
+ uint32_t gart_handle;
+ bool accel_done;
+
+ /* Push buffer state (only for drm's channel on !mm_enabled) */
+ struct {
+ int max;
+ int free;
+ int cur;
+ int put;
+ /* access via pushbuf_bo */
+ } dma;
+
+ uint32_t sw_subchannel[8];
+
+ struct {
+ struct nouveau_gpuobj *vblsem;
+ uint32_t vblsem_offset;
+ uint32_t vblsem_rval;
+ struct list_head vbl_wait;
+ } nvsw;
+
+ struct {
+ bool active;
+ char name[32];
+ struct drm_info_list info;
+ } debugfs;
+};
+
+struct nouveau_instmem_engine {
+ void *priv;
+
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ int (*suspend)(struct drm_device *dev);
+ void (*resume)(struct drm_device *dev);
+
+ int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+ void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+ void (*prepare_access)(struct drm_device *, bool write);
+ void (*finish_access)(struct drm_device *);
+};
+
+struct nouveau_mc_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_timer_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ uint64_t (*read)(struct drm_device *dev);
+};
+
+struct nouveau_fb_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_fifo_engine {
+ void *priv;
+
+ int channels;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ void (*disable)(struct drm_device *);
+ void (*enable)(struct drm_device *);
+ bool (*reassign)(struct drm_device *, bool enable);
+
+ int (*channel_id)(struct drm_device *);
+
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*unload_context)(struct drm_device *);
+};
+
+struct nouveau_pgraph_object_method {
+ int id;
+ int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
+ uint32_t data);
+};
+
+struct nouveau_pgraph_object_class {
+ int id;
+ bool software;
+ struct nouveau_pgraph_object_method *methods;
+};
+
+struct nouveau_pgraph_engine {
+ struct nouveau_pgraph_object_class *grclass;
+ bool accel_blocked;
+ void *ctxprog;
+ void *ctxvals;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ void (*fifo_access)(struct drm_device *, bool);
+
+ struct nouveau_channel *(*channel)(struct drm_device *);
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*unload_context)(struct drm_device *);
+};
+
+struct nouveau_engine {
+ struct nouveau_instmem_engine instmem;
+ struct nouveau_mc_engine mc;
+ struct nouveau_timer_engine timer;
+ struct nouveau_fb_engine fb;
+ struct nouveau_pgraph_engine graph;
+ struct nouveau_fifo_engine fifo;
+};
+
+struct nouveau_pll_vals {
+ union {
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t N1, M1, N2, M2;
+#else
+ uint8_t M1, N1, M2, N2;
+#endif
+ };
+ struct {
+ uint16_t NM1, NM2;
+ } __attribute__((packed));
+ };
+ int log2P;
+
+ int refclk;
+};
+
+enum nv04_fp_display_regs {
+ FP_DISPLAY_END,
+ FP_TOTAL,
+ FP_CRTC,
+ FP_SYNC_START,
+ FP_SYNC_END,
+ FP_VALID_START,
+ FP_VALID_END
+};
+
+struct nv04_crtc_reg {
+ unsigned char MiscOutReg; /* */
+ uint8_t CRTC[0x9f];
+ uint8_t CR58[0x10];
+ uint8_t Sequencer[5];
+ uint8_t Graphics[9];
+ uint8_t Attribute[21];
+ unsigned char DAC[768]; /* Internal Colorlookuptable */
+
+ /* PCRTC regs */
+ uint32_t fb_start;
+ uint32_t crtc_cfg;
+ uint32_t cursor_cfg;
+ uint32_t gpio_ext;
+ uint32_t crtc_830;
+ uint32_t crtc_834;
+ uint32_t crtc_850;
+ uint32_t crtc_eng_ctrl;
+
+ /* PRAMDAC regs */
+ uint32_t nv10_cursync;
+ struct nouveau_pll_vals pllvals;
+ uint32_t ramdac_gen_ctrl;
+ uint32_t ramdac_630;
+ uint32_t ramdac_634;
+ uint32_t tv_setup;
+ uint32_t tv_vtotal;
+ uint32_t tv_vskew;
+ uint32_t tv_vsync_delay;
+ uint32_t tv_htotal;
+ uint32_t tv_hskew;
+ uint32_t tv_hsync_delay;
+ uint32_t tv_hsync_delay2;
+ uint32_t fp_horiz_regs[7];
+ uint32_t fp_vert_regs[7];
+ uint32_t dither;
+ uint32_t fp_control;
+ uint32_t dither_regs[6];
+ uint32_t fp_debug_0;
+ uint32_t fp_debug_1;
+ uint32_t fp_debug_2;
+ uint32_t fp_margin_color;
+ uint32_t ramdac_8c0;
+ uint32_t ramdac_a20;
+ uint32_t ramdac_a24;
+ uint32_t ramdac_a34;
+ uint32_t ctv_regs[38];
+};
+
+struct nv04_output_reg {
+ uint32_t output;
+ int head;
+};
+
+struct nv04_mode_state {
+ uint32_t bpp;
+ uint32_t width;
+ uint32_t height;
+ uint32_t interlace;
+ uint32_t repaint0;
+ uint32_t repaint1;
+ uint32_t screen;
+ uint32_t scale;
+ uint32_t dither;
+ uint32_t extra;
+ uint32_t fifo;
+ uint32_t pixel;
+ uint32_t horiz;
+ int arbitration0;
+ int arbitration1;
+ uint32_t pll;
+ uint32_t pllB;
+ uint32_t vpll;
+ uint32_t vpll2;
+ uint32_t vpllB;
+ uint32_t vpll2B;
+ uint32_t pllsel;
+ uint32_t sel_clk;
+ uint32_t general;
+ uint32_t crtcOwner;
+ uint32_t head;
+ uint32_t head2;
+ uint32_t cursorConfig;
+ uint32_t cursor0;
+ uint32_t cursor1;
+ uint32_t cursor2;
+ uint32_t timingH;
+ uint32_t timingV;
+ uint32_t displayV;
+ uint32_t crtcSync;
+
+ struct nv04_crtc_reg crtc_reg[2];
+};
+
+enum nouveau_card_type {
+ NV_04 = 0x00,
+ NV_10 = 0x10,
+ NV_20 = 0x20,
+ NV_30 = 0x30,
+ NV_40 = 0x40,
+ NV_50 = 0x50,
+};
+
+struct drm_nouveau_private {
+ struct drm_device *dev;
+ enum {
+ NOUVEAU_CARD_INIT_DOWN,
+ NOUVEAU_CARD_INIT_DONE,
+ NOUVEAU_CARD_INIT_FAILED
+ } init_state;
+
+ /* the card type, takes NV_* as values */
+ enum nouveau_card_type card_type;
+ /* exact chipset, derived from NV_PMC_BOOT_0 */
+ int chipset;
+ int flags;
+
+ void __iomem *mmio;
+ void __iomem *ramin;
+ uint32_t ramin_size;
+
+ struct workqueue_struct *wq;
+ struct work_struct irq_work;
+
+ struct list_head vbl_waiting;
+
+ struct {
+ struct ttm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ spinlock_t bo_list_lock;
+ struct list_head bo_list;
+ atomic_t validate_sequence;
+ } ttm;
+
+ struct fb_info *fbdev_info;
+
+ int fifo_alloc_count;
+ struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+
+ struct nouveau_engine engine;
+ struct nouveau_channel *channel;
+
+ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
+ struct nouveau_gpuobj *ramht;
+ uint32_t ramin_rsvd_vram;
+ uint32_t ramht_offset;
+ uint32_t ramht_size;
+ uint32_t ramht_bits;
+ uint32_t ramfc_offset;
+ uint32_t ramfc_size;
+ uint32_t ramro_offset;
+ uint32_t ramro_size;
+
+ /* base physical adresses */
+ uint64_t fb_phys;
+ uint64_t fb_available_size;
+ uint64_t fb_mappable_pages;
+ uint64_t fb_aper_free;
+
+ struct {
+ enum {
+ NOUVEAU_GART_NONE = 0,
+ NOUVEAU_GART_AGP,
+ NOUVEAU_GART_SGDMA
+ } type;
+ uint64_t aper_base;
+ uint64_t aper_size;
+ uint64_t aper_free;
+
+ struct nouveau_gpuobj *sg_ctxdma;
+ struct page *sg_dummy_page;
+ dma_addr_t sg_dummy_bus;
+
+ /* nottm hack */
+ struct drm_ttm_backend *sg_be;
+ unsigned long sg_handle;
+ } gart_info;
+
+ /* G8x/G9x virtual address space */
+ uint64_t vm_gart_base;
+ uint64_t vm_gart_size;
+ uint64_t vm_vram_base;
+ uint64_t vm_vram_size;
+ uint64_t vm_end;
+ struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
+ int vm_vram_pt_nr;
+
+ /* the mtrr covering the FB */
+ int fb_mtrr;
+
+ struct mem_block *ramin_heap;
+
+ /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
+ uint32_t ctx_table_size;
+ struct nouveau_gpuobj_ref *ctx_table;
+
+ struct list_head gpuobj_list;
+
+ struct nvbios VBIOS;
+ struct nouveau_bios_info *vbios;
+
+ struct nv04_mode_state mode_reg;
+ struct nv04_mode_state saved_reg;
+ uint32_t saved_vga_font[4][16384];
+ uint32_t crtc_owner;
+ uint32_t dac_users[4];
+
+ struct nouveau_suspend_resume {
+ uint32_t fifo_mode;
+ uint32_t graph_ctx_control;
+ uint32_t graph_state;
+ uint32_t *ramin_copy;
+ uint64_t ramin_size;
+ } susres;
+
+ struct backlight_device *backlight;
+ bool acpi_dsm;
+
+ struct nouveau_channel *evo;
+
+ struct {
+ struct dentry *channel_root;
+ } debugfs;
+};
+
+static inline struct drm_nouveau_private *
+nouveau_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct drm_nouveau_private, ttm.bdev);
+}
+
+static inline int
+nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *prev;
+
+ if (!pnvbo)
+ return -EINVAL;
+ prev = *pnvbo;
+
+ *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
+ if (prev) {
+ struct ttm_buffer_object *bo = &prev->bo;
+
+ ttm_bo_unref(&bo);
+ }
+
+ return 0;
+}
+
+#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
+ NV_ERROR(dev, "called without init\n"); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (!nouveau_channel_owner(dev, (cl), (id))) { \
+ NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
+ DRM_CURRENTPID, (id)); \
+ return -EPERM; \
+ } \
+ (ch) = nv->fifos[(id)]; \
+} while (0)
+
+/* nouveau_drv.c */
+extern int nouveau_noagp;
+extern int nouveau_duallink;
+extern int nouveau_uscript_lvds;
+extern int nouveau_uscript_tmds;
+extern int nouveau_vram_pushbuf;
+extern int nouveau_vram_notify;
+extern int nouveau_fbpercrtc;
+extern char *nouveau_tv_norm;
+extern int nouveau_reg_debug;
+extern char *nouveau_vbios;
+
+/* nouveau_state.c */
+extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+extern int nouveau_load(struct drm_device *, unsigned long flags);
+extern int nouveau_firstopen(struct drm_device *);
+extern void nouveau_lastclose(struct drm_device *);
+extern int nouveau_unload(struct drm_device *);
+extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_for_idle(struct drm_device *);
+extern int nouveau_card_init(struct drm_device *);
+extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_resume(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_mem.c */
+extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
+ uint64_t size);
+extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
+ uint64_t size, int align2,
+ struct drm_file *, int tail);
+extern void nouveau_mem_takedown(struct mem_block **heap);
+extern void nouveau_mem_free_block(struct mem_block *);
+extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
+extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
+extern int nouveau_mem_init(struct drm_device *);
+extern int nouveau_mem_init_agp(struct drm_device *);
+extern void nouveau_mem_close(struct drm_device *);
+extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
+ uint32_t size, uint32_t flags,
+ uint64_t phys);
+extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
+ uint32_t size);
+
+/* nouveau_notifier.c */
+extern int nouveau_notifier_init_channel(struct nouveau_channel *);
+extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
+extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
+ int cout, uint32_t *offset);
+extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
+extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_channel.c */
+extern struct drm_ioctl_desc nouveau_ioctls[];
+extern int nouveau_max_ioctl;
+extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
+extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
+ int channel);
+extern int nouveau_channel_alloc(struct drm_device *dev,
+ struct nouveau_channel **chan,
+ struct drm_file *file_priv,
+ uint32_t fb_ctxdma, uint32_t tt_ctxdma);
+extern void nouveau_channel_free(struct nouveau_channel *);
+extern int nouveau_channel_idle(struct nouveau_channel *chan);
+
+/* nouveau_object.c */
+extern int nouveau_gpuobj_early_init(struct drm_device *);
+extern int nouveau_gpuobj_init(struct drm_device *);
+extern void nouveau_gpuobj_takedown(struct drm_device *);
+extern void nouveau_gpuobj_late_takedown(struct drm_device *);
+extern int nouveau_gpuobj_suspend(struct drm_device *dev);
+extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
+extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
+ uint32_t vram_h, uint32_t tt_h);
+extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
+extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
+ uint32_t size, int align, uint32_t flags,
+ struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
+ uint32_t handle, struct nouveau_gpuobj *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_del(struct drm_device *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret);
+extern int nouveau_gpuobj_new_ref(struct drm_device *,
+ struct nouveau_channel *alloc_chan,
+ struct nouveau_channel *ref_chan,
+ uint32_t handle, uint32_t size, int align,
+ uint32_t flags, struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *,
+ uint32_t p_offset, uint32_t b_offset,
+ uint32_t size, uint32_t flags,
+ struct nouveau_gpuobj **,
+ struct nouveau_gpuobj_ref**);
+extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
+ uint64_t offset, uint64_t size,
+ int access, struct nouveau_gpuobj **,
+ uint32_t *o_ret);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
+ struct nouveau_gpuobj **);
+extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_irq.c */
+extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void nouveau_irq_preinstall(struct drm_device *);
+extern int nouveau_irq_postinstall(struct drm_device *);
+extern void nouveau_irq_uninstall(struct drm_device *);
+
+/* nouveau_sgdma.c */
+extern int nouveau_sgdma_init(struct drm_device *);
+extern void nouveau_sgdma_takedown(struct drm_device *);
+extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
+ uint32_t *page);
+extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+
+/* nouveau_debugfs.c */
+#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
+extern int nouveau_debugfs_init(struct drm_minor *);
+extern void nouveau_debugfs_takedown(struct drm_minor *);
+extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
+extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
+#else
+static inline int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+}
+
+static inline int
+nouveau_debugfs_channel_init(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+static inline void
+nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
+{
+}
+#endif
+
+/* nouveau_dma.c */
+extern int nouveau_dma_init(struct nouveau_channel *);
+extern int nouveau_dma_wait(struct nouveau_channel *, int size);
+
+/* nouveau_acpi.c */
+#ifdef CONFIG_ACPI
+extern int nouveau_hybrid_setup(struct drm_device *dev);
+extern bool nouveau_dsm_probe(struct drm_device *dev);
+#else
+static inline int nouveau_hybrid_setup(struct drm_device *dev)
+{
+ return 0;
+}
+static inline bool nouveau_dsm_probe(struct drm_device *dev)
+{
+ return false;
+}
+#endif
+
+/* nouveau_backlight.c */
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+extern int nouveau_backlight_init(struct drm_device *);
+extern void nouveau_backlight_exit(struct drm_device *);
+#else
+static inline int nouveau_backlight_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void nouveau_backlight_exit(struct drm_device *dev) { }
+#endif
+
+/* nouveau_bios.c */
+extern int nouveau_bios_init(struct drm_device *);
+extern void nouveau_bios_takedown(struct drm_device *dev);
+extern int nouveau_run_vbios_init(struct drm_device *);
+extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
+ struct dcb_entry *);
+extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
+ enum dcb_gpio_tag);
+extern struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *, int index);
+extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
+ struct pll_lims *);
+extern int nouveau_bios_run_display_table(struct drm_device *,
+ struct dcb_entry *,
+ uint32_t script, int pxclk);
+extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
+ int *length);
+extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
+extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
+extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
+ bool *dl, bool *if_is_24bit);
+extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
+ int head, int pxclk);
+extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
+ enum LVDS_script, int pxclk);
+
+/* nouveau_ttm.c */
+int nouveau_ttm_global_init(struct drm_nouveau_private *);
+void nouveau_ttm_global_release(struct drm_nouveau_private *);
+int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+
+/* nouveau_dp.c */
+int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ uint8_t *data, int data_nr);
+bool nouveau_dp_detect(struct drm_encoder *);
+bool nouveau_dp_link_train(struct drm_encoder *);
+
+/* nv04_fb.c */
+extern int nv04_fb_init(struct drm_device *);
+extern void nv04_fb_takedown(struct drm_device *);
+
+/* nv10_fb.c */
+extern int nv10_fb_init(struct drm_device *);
+extern void nv10_fb_takedown(struct drm_device *);
+
+/* nv40_fb.c */
+extern int nv40_fb_init(struct drm_device *);
+extern void nv40_fb_takedown(struct drm_device *);
+
+/* nv04_fifo.c */
+extern int nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_disable(struct drm_device *);
+extern void nv04_fifo_enable(struct drm_device *);
+extern bool nv04_fifo_reassign(struct drm_device *, bool);
+extern int nv04_fifo_channel_id(struct drm_device *);
+extern int nv04_fifo_create_context(struct nouveau_channel *);
+extern void nv04_fifo_destroy_context(struct nouveau_channel *);
+extern int nv04_fifo_load_context(struct nouveau_channel *);
+extern int nv04_fifo_unload_context(struct drm_device *);
+
+/* nv10_fifo.c */
+extern int nv10_fifo_init(struct drm_device *);
+extern int nv10_fifo_channel_id(struct drm_device *);
+extern int nv10_fifo_create_context(struct nouveau_channel *);
+extern void nv10_fifo_destroy_context(struct nouveau_channel *);
+extern int nv10_fifo_load_context(struct nouveau_channel *);
+extern int nv10_fifo_unload_context(struct drm_device *);
+
+/* nv40_fifo.c */
+extern int nv40_fifo_init(struct drm_device *);
+extern int nv40_fifo_create_context(struct nouveau_channel *);
+extern void nv40_fifo_destroy_context(struct nouveau_channel *);
+extern int nv40_fifo_load_context(struct nouveau_channel *);
+extern int nv40_fifo_unload_context(struct drm_device *);
+
+/* nv50_fifo.c */
+extern int nv50_fifo_init(struct drm_device *);
+extern void nv50_fifo_takedown(struct drm_device *);
+extern int nv50_fifo_channel_id(struct drm_device *);
+extern int nv50_fifo_create_context(struct nouveau_channel *);
+extern void nv50_fifo_destroy_context(struct nouveau_channel *);
+extern int nv50_fifo_load_context(struct nouveau_channel *);
+extern int nv50_fifo_unload_context(struct drm_device *);
+
+/* nv04_graph.c */
+extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
+extern int nv04_graph_init(struct drm_device *);
+extern void nv04_graph_takedown(struct drm_device *);
+extern void nv04_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nv04_graph_channel(struct drm_device *);
+extern int nv04_graph_create_context(struct nouveau_channel *);
+extern void nv04_graph_destroy_context(struct nouveau_channel *);
+extern int nv04_graph_load_context(struct nouveau_channel *);
+extern int nv04_graph_unload_context(struct drm_device *);
+extern void nv04_graph_context_switch(struct drm_device *);
+
+/* nv10_graph.c */
+extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
+extern int nv10_graph_init(struct drm_device *);
+extern void nv10_graph_takedown(struct drm_device *);
+extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
+extern int nv10_graph_create_context(struct nouveau_channel *);
+extern void nv10_graph_destroy_context(struct nouveau_channel *);
+extern int nv10_graph_load_context(struct nouveau_channel *);
+extern int nv10_graph_unload_context(struct drm_device *);
+extern void nv10_graph_context_switch(struct drm_device *);
+
+/* nv20_graph.c */
+extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
+extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
+extern int nv20_graph_create_context(struct nouveau_channel *);
+extern void nv20_graph_destroy_context(struct nouveau_channel *);
+extern int nv20_graph_load_context(struct nouveau_channel *);
+extern int nv20_graph_unload_context(struct drm_device *);
+extern int nv20_graph_init(struct drm_device *);
+extern void nv20_graph_takedown(struct drm_device *);
+extern int nv30_graph_init(struct drm_device *);
+
+/* nv40_graph.c */
+extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
+extern int nv40_graph_init(struct drm_device *);
+extern void nv40_graph_takedown(struct drm_device *);
+extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
+extern int nv40_graph_create_context(struct nouveau_channel *);
+extern void nv40_graph_destroy_context(struct nouveau_channel *);
+extern int nv40_graph_load_context(struct nouveau_channel *);
+extern int nv40_graph_unload_context(struct drm_device *);
+extern int nv40_grctx_init(struct drm_device *);
+extern void nv40_grctx_fini(struct drm_device *);
+extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
+
+/* nv50_graph.c */
+extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
+extern int nv50_graph_init(struct drm_device *);
+extern void nv50_graph_takedown(struct drm_device *);
+extern void nv50_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
+extern int nv50_graph_create_context(struct nouveau_channel *);
+extern void nv50_graph_destroy_context(struct nouveau_channel *);
+extern int nv50_graph_load_context(struct nouveau_channel *);
+extern int nv50_graph_unload_context(struct drm_device *);
+extern void nv50_graph_context_switch(struct drm_device *);
+
+/* nv04_instmem.c */
+extern int nv04_instmem_init(struct drm_device *);
+extern void nv04_instmem_takedown(struct drm_device *);
+extern int nv04_instmem_suspend(struct drm_device *);
+extern void nv04_instmem_resume(struct drm_device *);
+extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
+extern void nv04_instmem_finish_access(struct drm_device *);
+
+/* nv50_instmem.c */
+extern int nv50_instmem_init(struct drm_device *);
+extern void nv50_instmem_takedown(struct drm_device *);
+extern int nv50_instmem_suspend(struct drm_device *);
+extern void nv50_instmem_resume(struct drm_device *);
+extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
+extern void nv50_instmem_finish_access(struct drm_device *);
+
+/* nv04_mc.c */
+extern int nv04_mc_init(struct drm_device *);
+extern void nv04_mc_takedown(struct drm_device *);
+
+/* nv40_mc.c */
+extern int nv40_mc_init(struct drm_device *);
+extern void nv40_mc_takedown(struct drm_device *);
+
+/* nv50_mc.c */
+extern int nv50_mc_init(struct drm_device *);
+extern void nv50_mc_takedown(struct drm_device *);
+
+/* nv04_timer.c */
+extern int nv04_timer_init(struct drm_device *);
+extern uint64_t nv04_timer_read(struct drm_device *);
+extern void nv04_timer_takedown(struct drm_device *);
+
+extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+/* nv04_dac.c */
+extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+extern int nv04_dac_output_offset(struct drm_encoder *encoder);
+extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
+
+/* nv04_dfp.c */
+extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
+extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
+ int head, bool dl);
+extern void nv04_dfp_disable(struct drm_device *dev, int head);
+extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
+
+/* nv04_tv.c */
+extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
+extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+
+/* nv17_tv.c */
+extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ uint32_t pin_mask);
+
+/* nv04_display.c */
+extern int nv04_display_create(struct drm_device *);
+extern void nv04_display_destroy(struct drm_device *);
+extern void nv04_display_restore(struct drm_device *);
+
+/* nv04_crtc.c */
+extern int nv04_crtc_create(struct drm_device *, int index);
+
+/* nouveau_bo.c */
+extern struct ttm_bo_driver nouveau_bo_driver;
+extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t flags,
+ uint32_t tile_mode, uint32_t tile_flags,
+ bool no_vm, bool mappable, struct nouveau_bo **);
+extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
+extern int nouveau_bo_unpin(struct nouveau_bo *);
+extern int nouveau_bo_map(struct nouveau_bo *);
+extern void nouveau_bo_unmap(struct nouveau_bo *);
+extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype);
+extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
+extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
+extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
+extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+
+/* nouveau_fence.c */
+struct nouveau_fence;
+extern int nouveau_fence_init(struct nouveau_channel *);
+extern void nouveau_fence_fini(struct nouveau_channel *);
+extern void nouveau_fence_update(struct nouveau_channel *);
+extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
+ bool emit);
+extern int nouveau_fence_emit(struct nouveau_fence *);
+struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
+extern bool nouveau_fence_signalled(void *obj, void *arg);
+extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int nouveau_fence_flush(void *obj, void *arg);
+extern void nouveau_fence_unref(void **obj);
+extern void *nouveau_fence_ref(void *obj);
+extern void nouveau_fence_handler(struct drm_device *dev, int channel);
+
+/* nouveau_gem.c */
+extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t flags,
+ uint32_t tile_mode, uint32_t tile_flags,
+ bool no_vm, bool mappable, struct nouveau_bo **);
+extern int nouveau_gem_object_new(struct drm_gem_object *);
+extern void nouveau_gem_object_del(struct drm_gem_object *);
+extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
+ struct drm_file *);
+
+/* nv17_gpio.c */
+int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+
+#ifndef ioread32_native
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native ioread32be
+#define iowrite32_native iowrite32be
+#else /* def __BIG_ENDIAN */
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native ioread32
+#define iowrite32_native iowrite32
+#endif /* def __BIG_ENDIAN else */
+#endif /* !ioread32_native */
+
+/* channel control reg access */
+static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
+{
+ return ioread32_native(chan->user + reg);
+}
+
+static inline void nvchan_wr32(struct nouveau_channel *chan,
+ unsigned reg, u32 val)
+{
+ iowrite32_native(val, chan->user + reg);
+}
+
+/* register access */
+static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return ioread32_native(dev_priv->mmio + reg);
+}
+
+static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ iowrite32_native(val, dev_priv->mmio + reg);
+}
+
+static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return ioread8(dev_priv->mmio + reg);
+}
+
+static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ iowrite8(val, dev_priv->mmio + reg);
+}
+
+#define nv_wait(reg, mask, val) \
+ nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+
+/* PRAMIN access */
+static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return ioread32_native(dev_priv->ramin + offset);
+}
+
+static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ iowrite32_native(val, dev_priv->ramin + offset);
+}
+
+/* object access */
+static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
+ unsigned index)
+{
+ return nv_ri32(dev, obj->im_pramin->start + index * 4);
+}
+
+static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
+ unsigned index, u32 val)
+{
+ nv_wi32(dev, obj->im_pramin->start + index * 4, val);
+}
+
+/*
+ * Logging
+ * Argument d is (struct drm_device *).
+ */
+#define NV_PRINTK(level, d, fmt, arg...) \
+ printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
+ pci_name(d->pdev), ##arg)
+#ifndef NV_DEBUG_NOTRACE
+#define NV_DEBUG(d, fmt, arg...) do { \
+ if (drm_debug) { \
+ NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
+ __LINE__, ##arg); \
+ } \
+} while (0)
+#else
+#define NV_DEBUG(d, fmt, arg...) do { \
+ if (drm_debug) \
+ NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
+} while (0)
+#endif
+#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
+#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
+#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
+#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
+#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+
+/* nouveau_reg_debug bitmask */
+enum {
+ NOUVEAU_REG_DEBUG_MC = 0x1,
+ NOUVEAU_REG_DEBUG_VIDEO = 0x2,
+ NOUVEAU_REG_DEBUG_FB = 0x4,
+ NOUVEAU_REG_DEBUG_EXTDEV = 0x8,
+ NOUVEAU_REG_DEBUG_CRTC = 0x10,
+ NOUVEAU_REG_DEBUG_RAMDAC = 0x20,
+ NOUVEAU_REG_DEBUG_VGACRTC = 0x40,
+ NOUVEAU_REG_DEBUG_RMVIO = 0x80,
+ NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
+ NOUVEAU_REG_DEBUG_EVO = 0x200,
+};
+
+#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
+ NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
+} while (0)
+
+static inline bool
+nv_two_heads(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const int impl = dev->pci_device & 0x0ff0;
+
+ if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
+ impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
+ return true;
+
+ return false;
+}
+
+static inline bool
+nv_gf4_disp_arch(struct drm_device *dev)
+{
+ return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+}
+
+static inline bool
+nv_two_reg_pll(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const int impl = dev->pci_device & 0x0ff0;
+
+ if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
+ return true;
+ return false;
+}
+
+#define NV50_NVSW 0x0000506e
+#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
+#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
+#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
+#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
+#define NV50_NVSW_DMA_VBLSEM 0x0000018c
+#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
+#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
+#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
+
+#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
new file mode 100644
index 00000000000..bc4a24029ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_ENCODER_H__
+#define __NOUVEAU_ENCODER_H__
+
+#include "drm_encoder_slave.h"
+#include "nouveau_drv.h"
+
+#define NV_DPMS_CLEARED 0x80
+
+struct nouveau_encoder {
+ struct drm_encoder_slave base;
+
+ struct dcb_entry *dcb;
+ int or;
+
+ struct drm_display_mode mode;
+ int last_dpms;
+
+ struct nv04_output_reg restore;
+
+ void (*disconnect)(struct nouveau_encoder *encoder);
+
+ union {
+ struct {
+ int dpcd_version;
+ int link_nr;
+ int link_bw;
+ } dp;
+ };
+};
+
+static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
+{
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+ return container_of(slave, struct nouveau_encoder, base);
+}
+
+static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
+{
+ return &enc->base.base;
+}
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
+int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+
+struct bit_displayport_encoder_table {
+ uint32_t match;
+ uint8_t record_nr;
+ uint8_t unknown;
+ uint16_t script0;
+ uint16_t script1;
+ uint16_t unknown_table;
+} __attribute__ ((packed));
+
+struct bit_displayport_encoder_table_entry {
+ uint8_t vs_level;
+ uint8_t pre_level;
+ uint8_t reg0;
+ uint8_t reg1;
+ uint8_t reg2;
+} __attribute__ ((packed));
+
+#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
new file mode 100644
index 00000000000..4a3f31aa194
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_FB_H__
+#define __NOUVEAU_FB_H__
+
+struct nouveau_framebuffer {
+ struct drm_framebuffer base;
+ struct nouveau_bo *nvbo;
+};
+
+static inline struct nouveau_framebuffer *
+nouveau_framebuffer(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct nouveau_framebuffer, base);
+}
+
+extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
+
+struct drm_framebuffer *
+nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
+ struct drm_mode_fb_cmd *);
+
+#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 00000000000..36e8c5e4503
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/screen_info.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_fbcon_sync(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret, i;
+
+ if (!chan->accel_done ||
+ info->state != FBINFO_STATE_RUNNING ||
+ info->flags & FBINFO_HWACCEL_DISABLED)
+ return 0;
+
+ if (RING_SPACE(chan, 4)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ return 0;
+ }
+
+ BEGIN_RING(chan, 0, 0x0104, 1);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, 0, 0x0100, 1);
+ OUT_RING(chan, 0);
+ nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+ FIRE_RING(chan);
+
+ ret = -EBUSY;
+ for (i = 0; i < 100000; i++) {
+ if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+ ret = 0;
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ return 0;
+ }
+
+ chan->accel_done = false;
+ return 0;
+}
+
+static struct fb_ops nouveau_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->lut.r[regno] = red;
+ nv_crtc->lut.g[regno] = green;
+ nv_crtc->lut.b[regno] = blue;
+}
+
+static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ *red = nv_crtc->lut.r[regno];
+ *green = nv_crtc->lut.g[regno];
+ *blue = nv_crtc->lut.b[regno];
+}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .gamma_set = nouveau_fbcon_gamma_set,
+ .gamma_get = nouveau_fbcon_gamma_get
+};
+
+#if defined(__i386__) || defined(__x86_64__)
+static bool
+nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int ramin;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
+ screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return false;
+
+ if (screen_info.lfb_base < pci_resource_start(pdev, 1))
+ goto not_fb;
+
+ if (screen_info.lfb_base + screen_info.lfb_size >=
+ pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
+ goto not_fb;
+
+ return true;
+not_fb:
+ ramin = 2;
+ if (pci_resource_len(pdev, ramin) == 0) {
+ ramin = 3;
+ if (pci_resource_len(pdev, ramin) == 0)
+ return false;
+ }
+
+ if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
+ return false;
+
+ if (screen_info.lfb_base + screen_info.lfb_size >=
+ pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
+ return false;
+
+ return true;
+}
+#endif
+
+void
+nouveau_fbcon_zfill(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct fb_info *info = dev_priv->fbdev_info;
+ struct fb_fillrect rect;
+
+ /* Clear the entire fbcon. The drm will program every connector
+ * with it's preferred mode. If the sizes differ, one display will
+ * quite likely have garbage around the console.
+ */
+ rect.dx = rect.dy = 0;
+ rect.width = info->var.xres_virtual;
+ rect.height = info->var.yres_virtual;
+ rect.color = 0;
+ rect.rop = ROP_COPY;
+ info->fbops->fb_fillrect(info, &rect);
+}
+
+static int
+nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
+ uint32_t fb_height, uint32_t surface_width,
+ uint32_t surface_height, uint32_t surface_depth,
+ uint32_t surface_bpp, struct drm_framebuffer **pfb)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct nouveau_fbcon_par *par;
+ struct drm_framebuffer *fb;
+ struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_bo *nvbo;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ int size, ret;
+
+ mode_cmd.width = surface_width;
+ mode_cmd.height = surface_height;
+
+ mode_cmd.bpp = surface_bpp;
+ mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
+ mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
+ mode_cmd.depth = surface_depth;
+
+ size = mode_cmd.pitch * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nvbo);
+ if (ret) {
+ NV_ERROR(dev, "failed to allocate framebuffer\n");
+ goto out;
+ }
+
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "failed to pin fb: %d\n", ret);
+ nouveau_bo_ref(NULL, &nvbo);
+ goto out;
+ }
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ NV_ERROR(dev, "failed to map fb: %d\n", ret);
+ nouveau_bo_unpin(nvbo);
+ nouveau_bo_ref(NULL, &nvbo);
+ goto out;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
+ if (!fb) {
+ ret = -ENOMEM;
+ NV_ERROR(dev, "failed to allocate fb.\n");
+ goto out_unref;
+ }
+
+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
+
+ nouveau_fb = nouveau_framebuffer(fb);
+ *pfb = fb;
+
+ info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ par = info->par;
+ par->helper.funcs = &nouveau_fbcon_helper_funcs;
+ par->helper.dev = dev;
+ ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
+ if (ret)
+ goto out_unref;
+ dev_priv->fbdev_info = info;
+
+ strcpy(info->fix.id, "nouveaufb");
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+ info->fbops = &nouveau_fbcon_ops;
+ info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
+ dev_priv->vm_vram_base;
+ info->fix.smem_len = size;
+
+ info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
+ info->screen_size = size;
+
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+
+ /* FIXME: we really shouldn't expose mmio space at all */
+ info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
+ info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+
+ /* Set aperture base/size for vesafb takeover */
+#if defined(__i386__) || defined(__x86_64__)
+ if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
+ /* Some NVIDIA VBIOS' are stupid and decide to put the
+ * framebuffer in the middle of the PRAMIN BAR for
+ * whatever reason. We need to know the exact lfb_base
+ * to get vesafb kicked off, and the only reliable way
+ * we have left is to find out lfb_base the same way
+ * vesafb did.
+ */
+ info->aperture_base = screen_info.lfb_base;
+ info->aperture_size = screen_info.lfb_size;
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
+ info->aperture_size *= 65536;
+ } else
+#endif
+ {
+ info->aperture_base = info->fix.mmio_start;
+ info->aperture_size = info->fix.mmio_len;
+ }
+
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+
+ fb->fbdev = info;
+
+ par->nouveau_fb = nouveau_fb;
+ par->dev = dev;
+
+ switch (dev_priv->card_type) {
+ case NV_50:
+ nv50_fbcon_accel_init(info);
+ break;
+ default:
+ nv04_fbcon_accel_init(info);
+ break;
+ };
+
+ nouveau_fbcon_zfill(dev);
+
+ /* To allow resizeing without swapping buffers */
+ NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
+ nouveau_fb->base.width,
+ nouveau_fb->base.height,
+ nvbo->bo.offset, nvbo);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+
+out_unref:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+}
+
+int
+nouveau_fbcon_probe(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+}
+
+int
+nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+ struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+ struct fb_info *info;
+
+ if (!fb)
+ return -EINVAL;
+
+ info = fb->fbdev;
+ if (info) {
+ struct nouveau_fbcon_par *par = info->par;
+
+ unregister_framebuffer(info);
+ nouveau_bo_unmap(nouveau_fb->nvbo);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(nouveau_fb->nvbo->gem);
+ nouveau_fb->nvbo = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ if (par)
+ drm_fb_helper_free(&par->helper);
+ framebuffer_release(info);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
new file mode 100644
index 00000000000..8531140fedb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_FBCON_H__
+#define __NOUVEAU_FBCON_H__
+
+#include "drm_fb_helper.h"
+
+struct nouveau_fbcon_par {
+ struct drm_fb_helper helper;
+ struct drm_device *dev;
+ struct nouveau_framebuffer *nouveau_fb;
+};
+
+int nouveau_fbcon_probe(struct drm_device *dev);
+int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+void nouveau_fbcon_restore(void);
+void nouveau_fbcon_zfill(struct drm_device *dev);
+
+int nv04_fbcon_accel_init(struct fb_info *info);
+int nv50_fbcon_accel_init(struct fb_info *info);
+
+#endif /* __NV50_FBCON_H__ */
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 00000000000..0cff7eb3690
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+#define USE_REFCNT (dev_priv->card_type >= NV_10)
+
+struct nouveau_fence {
+ struct nouveau_channel *channel;
+ struct kref refcount;
+ struct list_head entry;
+
+ uint32_t sequence;
+ bool signalled;
+};
+
+static inline struct nouveau_fence *
+nouveau_fence(void *sync_obj)
+{
+ return (struct nouveau_fence *)sync_obj;
+}
+
+static void
+nouveau_fence_del(struct kref *ref)
+{
+ struct nouveau_fence *fence =
+ container_of(ref, struct nouveau_fence, refcount);
+
+ kfree(fence);
+}
+
+void
+nouveau_fence_update(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct list_head *entry, *tmp;
+ struct nouveau_fence *fence;
+ uint32_t sequence;
+
+ if (USE_REFCNT)
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+ sequence = chan->fence.last_sequence_irq;
+
+ if (chan->fence.sequence_ack == sequence)
+ return;
+ chan->fence.sequence_ack = sequence;
+
+ list_for_each_safe(entry, tmp, &chan->fence.pending) {
+ fence = list_entry(entry, struct nouveau_fence, entry);
+
+ sequence = fence->sequence;
+ fence->signalled = true;
+ list_del(&fence->entry);
+ kref_put(&fence->refcount, nouveau_fence_del);
+
+ if (sequence == chan->fence.sequence_ack)
+ break;
+ }
+}
+
+int
+nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
+ bool emit)
+{
+ struct nouveau_fence *fence;
+ int ret = 0;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return -ENOMEM;
+ kref_init(&fence->refcount);
+ fence->channel = chan;
+
+ if (emit)
+ ret = nouveau_fence_emit(fence);
+
+ if (ret)
+ nouveau_fence_unref((void *)&fence);
+ *pfence = fence;
+ return ret;
+}
+
+struct nouveau_channel *
+nouveau_fence_channel(struct nouveau_fence *fence)
+{
+ return fence ? fence->channel : NULL;
+}
+
+int
+nouveau_fence_emit(struct nouveau_fence *fence)
+{
+ struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
+ struct nouveau_channel *chan = fence->channel;
+ unsigned long flags;
+ int ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ nouveau_fence_update(chan);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+
+ BUG_ON(chan->fence.sequence ==
+ chan->fence.sequence_ack - 1);
+ }
+
+ fence->sequence = ++chan->fence.sequence;
+
+ kref_get(&fence->refcount);
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ list_add_tail(&fence->entry, &chan->fence.pending);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+
+ BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
+ OUT_RING(chan, fence->sequence);
+ FIRE_RING(chan);
+
+ return 0;
+}
+
+void
+nouveau_fence_unref(void **sync_obj)
+{
+ struct nouveau_fence *fence = nouveau_fence(*sync_obj);
+
+ if (fence)
+ kref_put(&fence->refcount, nouveau_fence_del);
+ *sync_obj = NULL;
+}
+
+void *
+nouveau_fence_ref(void *sync_obj)
+{
+ struct nouveau_fence *fence = nouveau_fence(sync_obj);
+
+ kref_get(&fence->refcount);
+ return sync_obj;
+}
+
+bool
+nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+{
+ struct nouveau_fence *fence = nouveau_fence(sync_obj);
+ struct nouveau_channel *chan = fence->channel;
+ unsigned long flags;
+
+ if (fence->signalled)
+ return true;
+
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ nouveau_fence_update(chan);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+ return fence->signalled;
+}
+
+int
+nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+{
+ unsigned long timeout = jiffies + (3 * DRM_HZ);
+ int ret = 0;
+
+ __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+
+ while (1) {
+ if (nouveau_fence_signalled(sync_obj, sync_arg))
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ if (lazy)
+ schedule_timeout(1);
+
+ if (intr && signal_pending(current)) {
+ ret = -ERESTART;
+ break;
+ }
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ return ret;
+}
+
+int
+nouveau_fence_flush(void *sync_obj, void *sync_arg)
+{
+ return 0;
+}
+
+void
+nouveau_fence_handler(struct drm_device *dev, int channel)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+
+ if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
+ chan = dev_priv->fifos[channel];
+
+ if (chan) {
+ spin_lock_irq(&chan->fence.lock);
+ nouveau_fence_update(chan);
+ spin_unlock_irq(&chan->fence.lock);
+ }
+}
+
+int
+nouveau_fence_init(struct nouveau_channel *chan)
+{
+ INIT_LIST_HEAD(&chan->fence.pending);
+ spin_lock_init(&chan->fence.lock);
+ return 0;
+}
+
+void
+nouveau_fence_fini(struct nouveau_channel *chan)
+{
+ struct list_head *entry, *tmp;
+ struct nouveau_fence *fence;
+
+ list_for_each_safe(entry, tmp, &chan->fence.pending) {
+ fence = list_entry(entry, struct nouveau_fence, entry);
+
+ fence->signalled = true;
+ list_del(&fence->entry);
+ kref_put(&fence->refcount, nouveau_fence_del);
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
new file mode 100644
index 00000000000..11f831f0ddc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -0,0 +1,992 @@
+/*
+ * Copyright (C) 2008 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+#define nouveau_gem_pushbuf_sync(chan) 0
+
+int
+nouveau_gem_object_new(struct drm_gem_object *gem)
+{
+ return 0;
+}
+
+void
+nouveau_gem_object_del(struct drm_gem_object *gem)
+{
+ struct nouveau_bo *nvbo = gem->driver_private;
+ struct ttm_buffer_object *bo = &nvbo->bo;
+
+ if (!nvbo)
+ return;
+ nvbo->gem = NULL;
+
+ if (unlikely(nvbo->cpu_filp))
+ ttm_bo_synccpu_write_release(bo);
+
+ if (unlikely(nvbo->pin_refcnt)) {
+ nvbo->pin_refcnt = 1;
+ nouveau_bo_unpin(nvbo);
+ }
+
+ ttm_bo_unref(&bo);
+}
+
+int
+nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t flags, uint32_t tile_mode,
+ uint32_t tile_flags, bool no_vm, bool mappable,
+ struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
+ tile_flags, no_vm, mappable, pnvbo);
+ if (ret)
+ return ret;
+ nvbo = *pnvbo;
+
+ nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+ if (!nvbo->gem) {
+ nouveau_bo_ref(NULL, pnvbo);
+ return -ENOMEM;
+ }
+
+ nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
+ nvbo->gem->driver_private = nvbo;
+ return 0;
+}
+
+static int
+nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ rep->domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+
+ rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ rep->offset = nvbo->bo.offset;
+ rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
+ rep->tile_mode = nvbo->tile_mode;
+ rep->tile_flags = nvbo->tile_flags;
+ return 0;
+}
+
+static bool
+nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
+ switch (tile_flags) {
+ case 0x0000:
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7000:
+ case 0x7400:
+ case 0x7a00:
+ case 0xe000:
+ break;
+ default:
+ NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
+ return false;
+ }
+
+ return true;
+}
+
+int
+nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_gem_new *req = data;
+ struct nouveau_bo *nvbo = NULL;
+ struct nouveau_channel *chan = NULL;
+ uint32_t flags = 0;
+ int ret = 0;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
+ dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
+
+ if (req->channel_hint) {
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
+ file_priv, chan);
+ }
+
+ if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+ if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
+ flags |= TTM_PL_FLAG_SYSTEM;
+
+ if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+ return -EINVAL;
+
+ ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
+ req->info.tile_mode, req->info.tile_flags, false,
+ (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
+ &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gem_info(nvbo->gem, &req->info);
+ if (ret)
+ goto out;
+
+ ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(nvbo->gem);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ drm_gem_object_unreference(nvbo->gem);
+ return ret;
+}
+
+static int
+nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
+ uint32_t write_domains, uint32_t valid_domains)
+{
+ struct nouveau_bo *nvbo = gem->driver_private;
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ uint64_t flags;
+
+ if (!valid_domains || (!read_domains && !write_domains))
+ return -EINVAL;
+
+ if (write_domains) {
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ (write_domains & NOUVEAU_GEM_DOMAIN_GART))
+ flags = TTM_PL_FLAG_TT;
+ else
+ return -EINVAL;
+ } else {
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ bo->mem.mem_type == TTM_PL_VRAM)
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ bo->mem.mem_type == TTM_PL_TT)
+ flags = TTM_PL_FLAG_TT;
+ else
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ flags = TTM_PL_FLAG_TT;
+ }
+
+ nouveau_bo_placement_set(nvbo, flags);
+ return 0;
+}
+
+struct validate_op {
+ struct nouveau_fence *fence;
+ struct list_head vram_list;
+ struct list_head gart_list;
+ struct list_head both_list;
+};
+
+static void
+validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
+{
+ struct list_head *entry, *tmp;
+ struct nouveau_bo *nvbo;
+
+ list_for_each_safe(entry, tmp, list) {
+ nvbo = list_entry(entry, struct nouveau_bo, entry);
+ if (likely(fence)) {
+ struct nouveau_fence *prev_fence;
+
+ spin_lock(&nvbo->bo.lock);
+ prev_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = nouveau_fence_ref(fence);
+ spin_unlock(&nvbo->bo.lock);
+ nouveau_fence_unref((void *)&prev_fence);
+ }
+
+ list_del(&nvbo->entry);
+ nvbo->reserved_by = NULL;
+ ttm_bo_unreserve(&nvbo->bo);
+ drm_gem_object_unreference(nvbo->gem);
+ }
+}
+
+static void
+validate_fini(struct validate_op *op, bool success)
+{
+ struct nouveau_fence *fence = op->fence;
+
+ if (unlikely(!success))
+ op->fence = NULL;
+
+ validate_fini_list(&op->vram_list, op->fence);
+ validate_fini_list(&op->gart_list, op->fence);
+ validate_fini_list(&op->both_list, op->fence);
+ nouveau_fence_unref((void *)&fence);
+}
+
+static int
+validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ int nr_buffers, struct validate_op *op)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t sequence;
+ int trycnt = 0;
+ int ret, i;
+
+ sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
+retry:
+ if (++trycnt > 100000) {
+ NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nr_buffers; i++) {
+ struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+
+ gem = drm_gem_object_lookup(dev, file_priv, b->handle);
+ if (!gem) {
+ NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
+ validate_fini(op, NULL);
+ return -EINVAL;
+ }
+ nvbo = gem->driver_private;
+
+ if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
+ NV_ERROR(dev, "multiple instances of buffer %d on "
+ "validation list\n", b->handle);
+ validate_fini(op, NULL);
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+ if (ret) {
+ validate_fini(op, NULL);
+ if (ret == -EAGAIN)
+ ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+ drm_gem_object_unreference(gem);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+
+ nvbo->reserved_by = file_priv;
+ nvbo->pbbo_index = i;
+ if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
+ list_add_tail(&nvbo->entry, &op->both_list);
+ else
+ if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
+ list_add_tail(&nvbo->entry, &op->vram_list);
+ else
+ if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
+ list_add_tail(&nvbo->entry, &op->gart_list);
+ else {
+ NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
+ b->valid_domains);
+ validate_fini(op, NULL);
+ return -EINVAL;
+ }
+
+ if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
+ validate_fini(op, NULL);
+
+ if (nvbo->cpu_filp == file_priv) {
+ NV_ERROR(dev, "bo %p mapped by process trying "
+ "to validate it!\n", nvbo);
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+ if (ret == -ERESTART)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+ goto retry;
+ }
+ }
+
+ return 0;
+}
+
+static int
+validate_list(struct nouveau_channel *chan, struct list_head *list,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
+{
+ struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+ (void __force __user *)(uintptr_t)user_pbbo_ptr;
+ struct nouveau_bo *nvbo;
+ int ret, relocs = 0;
+
+ list_for_each_entry(nvbo, list, entry) {
+ struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+ struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+
+ if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+ b->write_domains,
+ b->valid_domains);
+ if (unlikely(ret))
+ return ret;
+
+ nvbo->channel = chan;
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+ false, false);
+ nvbo->channel = NULL;
+ if (unlikely(ret))
+ return ret;
+
+ if (nvbo->bo.offset == b->presumed_offset &&
+ ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
+ continue;
+
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed_offset = nvbo->bo.offset;
+ b->presumed_ok = 0;
+ relocs++;
+
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
+ return -EFAULT;
+ }
+
+ return relocs;
+}
+
+static int
+nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
+ struct drm_file *file_priv,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ uint64_t user_buffers, int nr_buffers,
+ struct validate_op *op, int *apply_relocs)
+{
+ int ret, relocs = 0;
+
+ INIT_LIST_HEAD(&op->vram_list);
+ INIT_LIST_HEAD(&op->gart_list);
+ INIT_LIST_HEAD(&op->both_list);
+
+ ret = nouveau_fence_new(chan, &op->fence, false);
+ if (ret)
+ return ret;
+
+ if (nr_buffers == 0)
+ return 0;
+
+ ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
+ if (unlikely(ret))
+ return ret;
+
+ ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
+ if (unlikely(ret < 0)) {
+ validate_fini(op, NULL);
+ return ret;
+ }
+ relocs += ret;
+
+ ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
+ if (unlikely(ret < 0)) {
+ validate_fini(op, NULL);
+ return ret;
+ }
+ relocs += ret;
+
+ ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
+ if (unlikely(ret < 0)) {
+ validate_fini(op, NULL);
+ return ret;
+ }
+ relocs += ret;
+
+ *apply_relocs = relocs;
+ return 0;
+}
+
+static inline void *
+u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
+{
+ void *mem;
+ void __user *userptr = (void __force __user *)(uintptr_t)user;
+
+ mem = kmalloc(nmemb * size, GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
+ kfree(mem);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return mem;
+}
+
+static int
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
+ struct drm_nouveau_gem_pushbuf_bo *bo,
+ int nr_relocs, uint64_t ptr_relocs,
+ int nr_dwords, int first_dword,
+ uint32_t *pushbuf, bool is_iomem)
+{
+ struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
+ struct drm_device *dev = chan->dev;
+ int ret = 0, i;
+
+ reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
+ if (IS_ERR(reloc))
+ return PTR_ERR(reloc);
+
+ for (i = 0; i < nr_relocs; i++) {
+ struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
+ struct drm_nouveau_gem_pushbuf_bo *b;
+ uint32_t data;
+
+ if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
+ r->reloc_index >= first_dword + nr_dwords) {
+ NV_ERROR(dev, "Bad relocation %d\n", i);
+ NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
+ NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
+ ret = -EINVAL;
+ break;
+ }
+
+ b = &bo[r->bo_index];
+ if (b->presumed_ok)
+ continue;
+
+ if (r->flags & NOUVEAU_GEM_RELOC_LOW)
+ data = b->presumed_offset + r->data;
+ else
+ if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
+ data = (b->presumed_offset + r->data) >> 32;
+ else
+ data = r->data;
+
+ if (r->flags & NOUVEAU_GEM_RELOC_OR) {
+ if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
+ data |= r->tor;
+ else
+ data |= r->vor;
+ }
+
+ if (is_iomem)
+ iowrite32_native(data, (void __force __iomem *)
+ &pushbuf[r->reloc_index]);
+ else
+ pushbuf[r->reloc_index] = data;
+ }
+
+ kfree(reloc);
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_pushbuf *req = data;
+ struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+ struct nouveau_channel *chan;
+ struct validate_op op;
+ uint32_t *pushbuf = NULL;
+ int ret = 0, do_reloc = 0, i;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+
+ if (req->nr_dwords >= chan->dma.max ||
+ req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
+ req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
+ NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
+ NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
+ chan->dma.max - 1);
+ NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
+ NOUVEAU_GEM_MAX_BUFFERS);
+ NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
+ NOUVEAU_GEM_MAX_RELOCS);
+ return -EINVAL;
+ }
+
+ pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
+ if (IS_ERR(pushbuf))
+ return PTR_ERR(pushbuf);
+
+ bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+ if (IS_ERR(bo)) {
+ kfree(pushbuf);
+ return PTR_ERR(bo);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Validate buffer list */
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+ req->nr_buffers, &op, &do_reloc);
+ if (ret)
+ goto out;
+
+ /* Apply any relocations that are required */
+ if (do_reloc) {
+ ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
+ bo, req->nr_relocs,
+ req->relocs,
+ req->nr_dwords, 0,
+ pushbuf, false);
+ if (ret)
+ goto out;
+ }
+
+ /* Emit push buffer to the hw
+ */
+ ret = RING_SPACE(chan, req->nr_dwords);
+ if (ret)
+ goto out;
+
+ OUT_RINGp(chan, pushbuf, req->nr_dwords);
+
+ ret = nouveau_fence_emit(op.fence);
+ if (ret) {
+ NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+ WIND_RING(chan);
+ goto out;
+ }
+
+ if (nouveau_gem_pushbuf_sync(chan)) {
+ ret = nouveau_fence_wait(op.fence, NULL, false, false);
+ if (ret) {
+ for (i = 0; i < req->nr_dwords; i++)
+ NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
+ NV_ERROR(dev, "^^ above push buffer is fail :(\n");
+ }
+ }
+
+out:
+ validate_fini(&op, ret == 0);
+ mutex_unlock(&dev->struct_mutex);
+ kfree(pushbuf);
+ kfree(bo);
+ return ret;
+}
+
+#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
+
+int
+nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_gem_pushbuf_call *req = data;
+ struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+ struct nouveau_channel *chan;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *pbbo;
+ struct validate_op op;
+ int i, ret = 0, do_reloc = 0;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+
+ if (unlikely(req->handle == 0))
+ goto out_next;
+
+ if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
+ req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
+ NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
+ NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
+ NOUVEAU_GEM_MAX_BUFFERS);
+ NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
+ NOUVEAU_GEM_MAX_RELOCS);
+ return -EINVAL;
+ }
+
+ bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Validate buffer list */
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+ req->nr_buffers, &op, &do_reloc);
+ if (ret) {
+ NV_ERROR(dev, "validate: %d\n", ret);
+ goto out;
+ }
+
+ /* Validate DMA push buffer */
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem) {
+ NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
+ ret = -EINVAL;
+ goto out;
+ }
+ pbbo = nouveau_gem_object(gem);
+
+ ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
+ chan->fence.sequence);
+ if (ret) {
+ NV_ERROR(dev, "resv pb: %d\n", ret);
+ drm_gem_object_unreference(gem);
+ goto out;
+ }
+
+ nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
+ ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
+ if (ret) {
+ NV_ERROR(dev, "validate pb: %d\n", ret);
+ ttm_bo_unreserve(&pbbo->bo);
+ drm_gem_object_unreference(gem);
+ goto out;
+ }
+
+ list_add_tail(&pbbo->entry, &op.both_list);
+
+ /* If presumed return address doesn't match, we need to map the
+ * push buffer and fix it..
+ */
+ if (!PUSHBUF_CAL) {
+ uint32_t retaddy;
+
+ if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
+ ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
+ if (ret) {
+ NV_ERROR(dev, "jmp_space: %d\n", ret);
+ goto out;
+ }
+ }
+
+ retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+ retaddy |= 0x20000000;
+ if (retaddy != req->suffix0) {
+ req->suffix0 = retaddy;
+ do_reloc = 1;
+ }
+ }
+
+ /* Apply any relocations that are required */
+ if (do_reloc) {
+ void *pbvirt;
+ bool is_iomem;
+ ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
+ &pbbo->kmap);
+ if (ret) {
+ NV_ERROR(dev, "kmap pb: %d\n", ret);
+ goto out;
+ }
+
+ pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
+ ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
+ req->nr_relocs,
+ req->relocs,
+ req->nr_dwords,
+ req->offset / 4,
+ pbvirt, is_iomem);
+
+ if (!PUSHBUF_CAL) {
+ nouveau_bo_wr32(pbbo,
+ req->offset / 4 + req->nr_dwords - 2,
+ req->suffix0);
+ }
+
+ ttm_bo_kunmap(&pbbo->kmap);
+ if (ret) {
+ NV_ERROR(dev, "reloc apply: %d\n", ret);
+ goto out;
+ }
+ }
+
+ if (PUSHBUF_CAL) {
+ ret = RING_SPACE(chan, 2);
+ if (ret) {
+ NV_ERROR(dev, "cal_space: %d\n", ret);
+ goto out;
+ }
+ OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
+ req->offset) | 2);
+ OUT_RING(chan, 0);
+ } else {
+ ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
+ if (ret) {
+ NV_ERROR(dev, "jmp_space: %d\n", ret);
+ goto out;
+ }
+ OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
+ req->offset) | 0x20000000);
+ OUT_RING(chan, 0);
+
+ /* Space the jumps apart with NOPs. */
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(chan, 0);
+ }
+
+ ret = nouveau_fence_emit(op.fence);
+ if (ret) {
+ NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+ WIND_RING(chan);
+ goto out;
+ }
+
+out:
+ validate_fini(&op, ret == 0);
+ mutex_unlock(&dev->struct_mutex);
+ kfree(bo);
+
+out_next:
+ if (PUSHBUF_CAL) {
+ req->suffix0 = 0x00020000;
+ req->suffix1 = 0x00000000;
+ } else {
+ req->suffix0 = 0x20000000 |
+ (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
+ req->suffix1 = 0x00000000;
+ }
+
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_gem_pushbuf_call *req = data;
+
+ req->vram_available = dev_priv->fb_aper_free;
+ req->gart_available = dev_priv->gart_info.aper_free;
+
+ return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
+}
+
+static inline uint32_t
+domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
+{
+ uint32_t flags = 0;
+
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+
+ return flags;
+}
+
+int
+nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_pin *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ int ret = 0;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
+ return -EINVAL;
+ }
+
+ if (!DRM_SUSER(DRM_CURPROC))
+ return -EPERM;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -EINVAL;
+ nvbo = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
+ if (ret)
+ goto out;
+
+ req->offset = nvbo->bo.offset;
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ req->domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_pin *req = data;
+ struct drm_gem_object *gem;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -EINVAL;
+
+ ret = nouveau_bo_unpin(nouveau_gem_object(gem));
+
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_cpu_prep *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
+ int ret = -EINVAL;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return ret;
+ nvbo = nouveau_gem_object(gem);
+
+ if (nvbo->cpu_filp) {
+ if (nvbo->cpu_filp == file_priv)
+ goto out;
+
+ ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
+ if (ret == -ERESTART)
+ ret = -EAGAIN;
+ if (ret)
+ goto out;
+ }
+
+ if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+ ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+ } else {
+ ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
+ if (ret == -ERESTART)
+ ret = -EAGAIN;
+ else
+ if (ret == 0)
+ nvbo->cpu_filp = file_priv;
+ }
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_cpu_prep *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ int ret = -EINVAL;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return ret;
+ nvbo = nouveau_gem_object(gem);
+
+ if (nvbo->cpu_filp != file_priv)
+ goto out;
+ nvbo->cpu_filp = NULL;
+
+ ttm_bo_synccpu_write_release(&nvbo->bo);
+ ret = 0;
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_info *req = data;
+ struct drm_gem_object *gem;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -EINVAL;
+
+ ret = nouveau_gem_info(gem, req);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
new file mode 100644
index 00000000000..dc46792a5c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+#define CHIPSET_NFORCE 0x01a0
+#define CHIPSET_NFORCE2 0x01f0
+
+/*
+ * misc hw access wrappers/control functions
+ */
+
+void
+NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+ NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
+}
+
+uint8_t
+NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+ return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
+}
+
+void
+NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+ NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
+}
+
+uint8_t
+NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+ return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
+}
+
+/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
+ * it affects only the 8 bit vga io regs, which we access using mmio at
+ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
+ * in general, the set value of cr44 does not matter: reg access works as
+ * expected and values can be set for the appropriate head by using a 0x2000
+ * offset as required
+ * however:
+ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
+ * cr44 must be set to 0 or 3 for accessing values on the correct head
+ * through the common 0xc03c* addresses
+ * b) in tied mode (4) head B is programmed to the values set on head A, and
+ * access using the head B addresses can have strange results, ergo we leave
+ * tied mode in init once we know to what cr44 should be restored on exit
+ *
+ * the owner parameter is slightly abused:
+ * 0 and 1 are treated as head values and so the set value is (owner * 3)
+ * other values are treated as literal values to set
+ */
+void
+NVSetOwner(struct drm_device *dev, int owner)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (owner == 1)
+ owner *= 3;
+
+ if (dev_priv->chipset == 0x11) {
+ /* This might seem stupid, but the blob does it and
+ * omitting it often locks the system up.
+ */
+ NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+ NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
+ }
+
+ /* CR44 is always changed on CRTC0 */
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
+
+ if (dev_priv->chipset == 0x11) { /* set me harder */
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+ }
+}
+
+void
+NVBlankScreen(struct drm_device *dev, int head, bool blank)
+{
+ unsigned char seq1;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, head);
+
+ seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+ NVVgaSeqReset(dev, head, true);
+ if (blank)
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+ else
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
+ NVVgaSeqReset(dev, head, false);
+}
+
+/*
+ * PLL setting
+ */
+
+static int
+powerctrl_1_shift(int chip_version, int reg)
+{
+ int shift = -4;
+
+ if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
+ return shift;
+
+ switch (reg) {
+ case NV_RAMDAC_VPLL2:
+ shift += 4;
+ case NV_PRAMDAC_VPLL_COEFF:
+ shift += 4;
+ case NV_PRAMDAC_MPLL_COEFF:
+ shift += 4;
+ case NV_PRAMDAC_NVPLL_COEFF:
+ shift += 4;
+ }
+
+ /*
+ * the shift for vpll regs is only used for nv3x chips with a single
+ * stage pll
+ */
+ if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
+ chip_version == 0x36 || chip_version >= 0x40))
+ shift = -4;
+
+ return shift;
+}
+
+static void
+setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chip_version = dev_priv->vbios->chip_version;
+ uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
+ int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
+ uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+ uint32_t saved_powerctrl_1 = 0;
+ int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
+
+ if (oldpll == pll)
+ return; /* already set */
+
+ if (shift_powerctrl_1 >= 0) {
+ saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
+ (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+ 1 << shift_powerctrl_1);
+ }
+
+ if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
+ /* upclock -- write new post divider first */
+ NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
+ else
+ /* downclock -- write new NM first */
+ NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
+
+ if (chip_version < 0x17 && chip_version != 0x11)
+ /* wait a bit on older chips */
+ msleep(64);
+ NVReadRAMDAC(dev, 0, reg);
+
+ /* then write the other half as well */
+ NVWriteRAMDAC(dev, 0, reg, pll);
+
+ if (shift_powerctrl_1 >= 0)
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
+}
+
+static uint32_t
+new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
+{
+ bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
+
+ if (ss) /* single stage pll mode */
+ ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
+ NV_RAMDAC_580_VPLL2_ACTIVE;
+ else
+ ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
+ ~NV_RAMDAC_580_VPLL2_ACTIVE;
+
+ return ramdac580;
+}
+
+static void
+setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
+ struct nouveau_pll_vals *pv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chip_version = dev_priv->vbios->chip_version;
+ bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
+ uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
+ uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
+ uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
+ uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+ uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
+ uint32_t oldramdac580 = 0, ramdac580 = 0;
+ bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
+ uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
+ int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
+
+ /* model specific additions to generic pll1 and pll2 set up above */
+ if (nv3035) {
+ pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
+ (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
+ pll2 = 0;
+ }
+ if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
+ oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
+ ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
+ if (oldramdac580 != ramdac580)
+ oldpll1 = ~0; /* force mismatch */
+ if (single_stage)
+ /* magic value used by nvidia in single stage mode */
+ pll2 |= 0x011f;
+ }
+ if (chip_version > 0x70)
+ /* magic bits set by the blob (but not the bios) on g71-73 */
+ pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
+
+ if (oldpll1 == pll1 && oldpll2 == pll2)
+ return; /* already set */
+
+ if (shift_powerctrl_1 >= 0) {
+ saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
+ (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+ 1 << shift_powerctrl_1);
+ }
+
+ if (chip_version >= 0x40) {
+ int shift_c040 = 14;
+
+ switch (reg1) {
+ case NV_PRAMDAC_MPLL_COEFF:
+ shift_c040 += 2;
+ case NV_PRAMDAC_NVPLL_COEFF:
+ shift_c040 += 2;
+ case NV_RAMDAC_VPLL2:
+ shift_c040 += 2;
+ case NV_PRAMDAC_VPLL_COEFF:
+ shift_c040 += 2;
+ }
+
+ savedc040 = nvReadMC(dev, 0xc040);
+ if (shift_c040 != 14)
+ nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
+ }
+
+ if (oldramdac580 != ramdac580)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
+
+ if (!nv3035)
+ NVWriteRAMDAC(dev, 0, reg2, pll2);
+ NVWriteRAMDAC(dev, 0, reg1, pll1);
+
+ if (shift_powerctrl_1 >= 0)
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
+ if (chip_version >= 0x40)
+ nvWriteMC(dev, 0xc040, savedc040);
+}
+
+static void
+setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
+ struct nouveau_pll_vals *pv)
+{
+ /* When setting PLLs, there is a merry game of disabling and enabling
+ * various bits of hardware during the process. This function is a
+ * synthesis of six nv4x traces, nearly each card doing a subtly
+ * different thing. With luck all the necessary bits for each card are
+ * combined herein. Without luck it deviates from each card's formula
+ * so as to not work on any :)
+ */
+
+ uint32_t Preg = NMNMreg - 4;
+ bool mpll = Preg == 0x4020;
+ uint32_t oldPval = nvReadMC(dev, Preg);
+ uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
+ uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
+ 0xc << 28 | pv->log2P << 16;
+ uint32_t saved4600 = 0;
+ /* some cards have different maskc040s */
+ uint32_t maskc040 = ~(3 << 14), savedc040;
+ bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
+
+ if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+ return;
+
+ if (Preg == 0x4000)
+ maskc040 = ~0x333;
+ if (Preg == 0x4058)
+ maskc040 = ~(0xc << 24);
+
+ if (mpll) {
+ struct pll_lims pll_lim;
+ uint8_t Pval2;
+
+ if (get_pll_limits(dev, Preg, &pll_lim))
+ return;
+
+ Pval2 = pv->log2P + pll_lim.log2p_bias;
+ if (Pval2 > pll_lim.max_log2p)
+ Pval2 = pll_lim.max_log2p;
+ Pval |= 1 << 28 | Pval2 << 20;
+
+ saved4600 = nvReadMC(dev, 0x4600);
+ nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
+ }
+ if (single_stage)
+ Pval |= mpll ? 1 << 12 : 1 << 8;
+
+ nvWriteMC(dev, Preg, oldPval | 1 << 28);
+ nvWriteMC(dev, Preg, Pval & ~(4 << 28));
+ if (mpll) {
+ Pval |= 8 << 20;
+ nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
+ nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
+ }
+
+ savedc040 = nvReadMC(dev, 0xc040);
+ nvWriteMC(dev, 0xc040, savedc040 & maskc040);
+
+ nvWriteMC(dev, NMNMreg, NMNM);
+ if (NMNMreg == 0x4024)
+ nvWriteMC(dev, 0x403c, NMNM);
+
+ nvWriteMC(dev, Preg, Pval);
+ if (mpll) {
+ Pval &= ~(8 << 20);
+ nvWriteMC(dev, 0x4020, Pval);
+ nvWriteMC(dev, 0x4038, Pval);
+ nvWriteMC(dev, 0x4600, saved4600);
+ }
+
+ nvWriteMC(dev, 0xc040, savedc040);
+
+ if (mpll) {
+ nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
+ nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
+ }
+}
+
+void
+nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
+ struct nouveau_pll_vals *pv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int cv = dev_priv->vbios->chip_version;
+
+ if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+ cv >= 0x40) {
+ if (reg1 > 0x405c)
+ setPLL_double_highregs(dev, reg1, pv);
+ else
+ setPLL_double_lowregs(dev, reg1, pv);
+ } else
+ setPLL_single(dev, reg1, pv);
+}
+
+/*
+ * PLL getting
+ */
+
+static void
+nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
+ uint32_t pll2, struct nouveau_pll_vals *pllvals)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
+
+ /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
+ pllvals->log2P = (pll1 >> 16) & 0x7;
+ pllvals->N2 = pllvals->M2 = 1;
+
+ if (reg1 <= 0x405c) {
+ pllvals->NM1 = pll2 & 0xffff;
+ /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
+ if (!(pll1 & 0x1100))
+ pllvals->NM2 = pll2 >> 16;
+ } else {
+ pllvals->NM1 = pll1 & 0xffff;
+ if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
+ pllvals->NM2 = pll2 & 0xffff;
+ else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
+ pllvals->M1 &= 0xf; /* only 4 bits */
+ if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
+ pllvals->M2 = (pll1 >> 4) & 0x7;
+ pllvals->N2 = ((pll1 >> 21) & 0x18) |
+ ((pll1 >> 19) & 0x7);
+ }
+ }
+ }
+}
+
+int
+nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
+ struct nouveau_pll_vals *pllvals)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
+ NV_PRAMDAC_MPLL_COEFF,
+ NV_PRAMDAC_VPLL_COEFF,
+ NV_RAMDAC_VPLL2 };
+ const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
+ 0x4020,
+ NV_PRAMDAC_VPLL_COEFF,
+ NV_RAMDAC_VPLL2 };
+ uint32_t reg1, pll1, pll2 = 0;
+ struct pll_lims pll_lim;
+ int ret;
+
+ if (dev_priv->card_type < NV_40)
+ reg1 = nv04_regs[plltype];
+ else
+ reg1 = nv40_regs[plltype];
+
+ pll1 = nvReadMC(dev, reg1);
+
+ if (reg1 <= 0x405c)
+ pll2 = nvReadMC(dev, reg1 + 4);
+ else if (nv_two_reg_pll(dev)) {
+ uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
+
+ pll2 = nvReadMC(dev, reg2);
+ }
+
+ if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+ uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
+
+ /* check whether vpll has been forced into single stage mode */
+ if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
+ if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
+ pll2 = 0;
+ } else
+ if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
+ pll2 = 0;
+ }
+
+ nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
+
+ ret = get_pll_limits(dev, plltype, &pll_lim);
+ if (ret)
+ return ret;
+
+ pllvals->refclk = pll_lim.refclk;
+
+ return 0;
+}
+
+int
+nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
+{
+ /* Avoid divide by zero if called at an inappropriate time */
+ if (!pv->M1 || !pv->M2)
+ return 0;
+
+ return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
+}
+
+int
+nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
+{
+ struct nouveau_pll_vals pllvals;
+
+ if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+ uint32_t mpllP;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+ if (!mpllP)
+ mpllP = 4;
+
+ return 400000 / mpllP;
+ } else
+ if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+ uint32_t clock;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+ return clock;
+ }
+
+ nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+
+ return nouveau_hw_pllvals_to_clk(&pllvals);
+}
+
+static void
+nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
+{
+ /* the vpll on an unused head can come up with a random value, way
+ * beyond the pll limits. for some reason this causes the chip to
+ * lock up when reading the dac palette regs, so set a valid pll here
+ * when such a condition detected. only seen on nv11 to date
+ */
+
+ struct pll_lims pll_lim;
+ struct nouveau_pll_vals pv;
+ uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+
+ if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
+ return;
+ nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
+
+ if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
+ pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
+ pv.log2P <= pll_lim.max_log2p)
+ return;
+
+ NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
+
+ /* set lowest clock within static limits */
+ pv.M1 = pll_lim.vco1.max_m;
+ pv.N1 = pll_lim.vco1.min_n;
+ pv.log2P = pll_lim.max_usable_log2p;
+ nouveau_hw_setpll(dev, pllreg, &pv);
+}
+
+/*
+ * vga font save/restore
+ */
+
+static void nouveau_vga_font_io(struct drm_device *dev,
+ void __iomem *iovram,
+ bool save, unsigned plane)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned i;
+
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
+ for (i = 0; i < 16384; i++) {
+ if (save) {
+ dev_priv->saved_vga_font[plane][i] =
+ ioread32_native(iovram + i * 4);
+ } else {
+ iowrite32_native(dev_priv->saved_vga_font[plane][i],
+ iovram + i * 4);
+ }
+ }
+}
+
+void
+nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
+{
+ uint8_t misc, gr4, gr5, gr6, seq2, seq4;
+ bool graphicsmode;
+ unsigned plane;
+ void __iomem *iovram;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, 0);
+
+ NVSetEnablePalette(dev, 0, true);
+ graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
+ NVSetEnablePalette(dev, 0, false);
+
+ if (graphicsmode) /* graphics mode => framebuffer => no need to save */
+ return;
+
+ NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
+
+ /* map first 64KiB of VRAM, holds VGA fonts etc */
+ iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+ if (!iovram) {
+ NV_ERROR(dev, "Failed to map VRAM, "
+ "cannot save/restore VGA fonts.\n");
+ return;
+ }
+
+ if (nv_two_heads(dev))
+ NVBlankScreen(dev, 1, true);
+ NVBlankScreen(dev, 0, true);
+
+ /* save control regs */
+ misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
+ seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
+ seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
+ gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
+ gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
+ gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
+
+ NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
+
+ /* store font in planes 0..3 */
+ for (plane = 0; plane < 4; plane++)
+ nouveau_vga_font_io(dev, iovram, save, plane);
+
+ /* restore control regs */
+ NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
+
+ if (nv_two_heads(dev))
+ NVBlankScreen(dev, 1, false);
+ NVBlankScreen(dev, 0, false);
+
+ iounmap(iovram);
+}
+
+/*
+ * mode state save/load
+ */
+
+static void
+rd_cio_state(struct drm_device *dev, int head,
+ struct nv04_crtc_reg *crtcstate, int index)
+{
+ crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
+}
+
+static void
+wr_cio_state(struct drm_device *dev, int head,
+ struct nv04_crtc_reg *crtcstate, int index)
+{
+ NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
+}
+
+static void
+nv_save_state_ramdac(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ if (dev_priv->card_type >= NV_10)
+ regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
+
+ nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
+ state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
+ if (nv_two_heads(dev))
+ state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+ if (dev_priv->chipset == 0x11)
+ regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
+
+ regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
+
+ if (nv_gf4_disp_arch(dev))
+ regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
+ if (dev_priv->chipset >= 0x30)
+ regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
+
+ regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
+ regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
+ regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
+ regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
+ regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
+ regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
+ regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
+ regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
+
+ for (i = 0; i < 7; i++) {
+ uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+ regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
+ regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
+ }
+
+ if (nv_gf4_disp_arch(dev)) {
+ regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
+ for (i = 0; i < 3; i++) {
+ regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
+ regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
+ }
+ }
+
+ regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+ regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
+ if (!nv_gf4_disp_arch(dev) && head == 0) {
+ /* early chips don't allow access to PRAMDAC_TMDS_* without
+ * the head A FPCLK on (nv11 even locks up) */
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
+ ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
+ }
+ regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
+ regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
+
+ regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
+
+ if (nv_gf4_disp_arch(dev))
+ regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
+
+ if (dev_priv->card_type == NV_40) {
+ regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
+ regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
+ regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
+
+ for (i = 0; i < 38; i++)
+ regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
+ NV_PRAMDAC_CTV + 4*i);
+ }
+}
+
+static void
+nv_load_state_ramdac(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+ int i;
+
+ if (dev_priv->card_type >= NV_10)
+ NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
+
+ nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+ if (nv_two_heads(dev))
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
+ if (dev_priv->chipset == 0x11)
+ NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
+
+ if (nv_gf4_disp_arch(dev))
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
+ if (dev_priv->chipset >= 0x30)
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
+
+ for (i = 0; i < 7; i++) {
+ uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+
+ NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
+ NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
+ }
+
+ if (nv_gf4_disp_arch(dev)) {
+ NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
+ for (i = 0; i < 3; i++) {
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
+ }
+ }
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
+
+ if (nv_gf4_disp_arch(dev))
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
+
+ if (dev_priv->card_type == NV_40) {
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
+
+ for (i = 0; i < 38; i++)
+ NVWriteRAMDAC(dev, head,
+ NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
+ }
+}
+
+static void
+nv_save_state_vga(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
+
+ for (i = 0; i < 25; i++)
+ rd_cio_state(dev, head, regp, i);
+
+ NVSetEnablePalette(dev, head, true);
+ for (i = 0; i < 21; i++)
+ regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
+ NVSetEnablePalette(dev, head, false);
+
+ for (i = 0; i < 9; i++)
+ regp->Graphics[i] = NVReadVgaGr(dev, head, i);
+
+ for (i = 0; i < 5; i++)
+ regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
+}
+
+static void
+nv_load_state_vga(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
+
+ for (i = 0; i < 5; i++)
+ NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
+
+ nv_lock_vga_crtc_base(dev, head, false);
+ for (i = 0; i < 25; i++)
+ wr_cio_state(dev, head, regp, i);
+ nv_lock_vga_crtc_base(dev, head, true);
+
+ for (i = 0; i < 9; i++)
+ NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
+
+ NVSetEnablePalette(dev, head, true);
+ for (i = 0; i < 21; i++)
+ NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
+ NVSetEnablePalette(dev, head, false);
+}
+
+static void
+nv_save_state_ext(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
+ if (dev_priv->card_type >= NV_30)
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+ if (dev_priv->card_type >= NV_10) {
+ regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
+ regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
+
+ if (dev_priv->card_type >= NV_30)
+ regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
+
+ if (dev_priv->card_type == NV_40)
+ regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
+
+ if (nv_two_heads(dev))
+ regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
+ regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
+ }
+
+ regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+ if (dev_priv->card_type >= NV_10) {
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+ }
+ /* NV11 and NV20 don't have this, they stop at 0x52. */
+ if (nv_gf4_disp_arch(dev)) {
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+ for (i = 0; i < 0x10; i++)
+ regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
+ }
+
+ regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
+}
+
+static void
+nv_load_state_ext(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ uint32_t reg900;
+ int i;
+
+ if (dev_priv->card_type >= NV_10) {
+ if (nv_two_heads(dev))
+ /* setting ENGINE_CTRL (EC) *must* come before
+ * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
+ * EC that should not be overwritten by writing stale EC
+ */
+ NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
+
+ nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
+ nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
+ nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
+ nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+
+ NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
+ NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
+ NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
+
+ if (dev_priv->card_type >= NV_30)
+ NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
+
+ if (dev_priv->card_type == NV_40) {
+ NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
+
+ reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
+ if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
+ else
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
+ }
+ }
+
+ NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+ if (dev_priv->card_type >= NV_30)
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+ if (dev_priv->card_type == NV_40)
+ nv_fix_nv40_hw_cursor(dev, head);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+ if (dev_priv->card_type >= NV_10) {
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+ }
+ /* NV11 and NV20 stop at 0x52. */
+ if (nv_gf4_disp_arch(dev)) {
+ if (dev_priv->card_type == NV_10) {
+ /* Not waiting for vertical retrace before modifying
+ CRE_53/CRE_54 causes lockups. */
+ nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+ nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+ }
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+ for (i = 0; i < 0x10; i++)
+ NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
+ }
+
+ NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
+
+ /* Setting 1 on this value gives you interrupts for every vblank period. */
+ NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+ NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
+}
+
+static void
+nv_save_state_palette(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ int head_offset = head * NV_PRMDIO_SIZE, i;
+
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+ NV_PRMDIO_PIXEL_MASK_MASK);
+ nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
+
+ for (i = 0; i < 768; i++) {
+ state->crtc_reg[head].DAC[i] = nv_rd08(dev,
+ NV_PRMDIO_PALETTE_DATA + head_offset);
+ }
+
+ NVSetEnablePalette(dev, head, false);
+}
+
+void
+nouveau_hw_load_state_palette(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ int head_offset = head * NV_PRMDIO_SIZE, i;
+
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+ NV_PRMDIO_PIXEL_MASK_MASK);
+ nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
+
+ for (i = 0; i < 768; i++) {
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
+ state->crtc_reg[head].DAC[i]);
+ }
+
+ NVSetEnablePalette(dev, head, false);
+}
+
+void nouveau_hw_save_state(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0x11)
+ /* NB: no attempt is made to restore the bad pll later on */
+ nouveau_hw_fix_bad_vpll(dev, head);
+ nv_save_state_ramdac(dev, head, state);
+ nv_save_state_vga(dev, head, state);
+ nv_save_state_palette(dev, head, state);
+ nv_save_state_ext(dev, head, state);
+}
+
+void nouveau_hw_load_state(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ NVVgaProtect(dev, head, true);
+ nv_load_state_ramdac(dev, head, state);
+ nv_load_state_ext(dev, head, state);
+ nouveau_hw_load_state_palette(dev, head, state);
+ nv_load_state_vga(dev, head, state);
+ NVVgaProtect(dev, head, false);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
new file mode 100644
index 00000000000..869130f8360
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_HW_H__
+#define __NOUVEAU_HW_H__
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MASK(field) ( \
+ (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
+
+#define XLATE(src, srclowbit, outfield) ( \
+ (((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
+
+void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
+void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
+void NVSetOwner(struct drm_device *, int owner);
+void NVBlankScreen(struct drm_device *, int head, bool blank);
+void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
+ struct nouveau_pll_vals *pv);
+int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
+ struct nouveau_pll_vals *pllvals);
+int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
+int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
+void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
+void nouveau_hw_save_state(struct drm_device *, int head,
+ struct nv04_mode_state *state);
+void nouveau_hw_load_state(struct drm_device *, int head,
+ struct nv04_mode_state *state);
+void nouveau_hw_load_state_palette(struct drm_device *, int head,
+ struct nv04_mode_state *state);
+
+/* nouveau_calc.c */
+extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
+ int *burst, int *lwm);
+extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
+ int clk, struct nouveau_pll_vals *pv);
+
+static inline uint32_t
+nvReadMC(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadVIDEO(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadFB(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t NVReadCRTC(struct drm_device *dev,
+ int head, uint32_t reg)
+{
+ uint32_t val;
+ if (head)
+ reg += NV_PCRTC0_SIZE;
+ val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
+ return val;
+}
+
+static inline void NVWriteCRTC(struct drm_device *dev,
+ int head, uint32_t reg, uint32_t val)
+{
+ if (head)
+ reg += NV_PCRTC0_SIZE;
+ NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
+ int head, uint32_t reg)
+{
+ uint32_t val;
+ if (head)
+ reg += NV_PRAMDAC0_SIZE;
+ val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
+ head, reg, val);
+ return val;
+}
+
+static inline void NVWriteRAMDAC(struct drm_device *dev,
+ int head, uint32_t reg, uint32_t val)
+{
+ if (head)
+ reg += NV_PRAMDAC0_SIZE;
+ NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
+ head, reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint8_t nv_read_tmds(struct drm_device *dev,
+ int or, int dl, uint8_t address)
+{
+ int ramdac = (or & OUTPUT_C) >> 2;
+
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
+ NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
+ return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
+}
+
+static inline void nv_write_tmds(struct drm_device *dev,
+ int or, int dl, uint8_t address,
+ uint8_t data)
+{
+ int ramdac = (or & OUTPUT_C) >> 2;
+
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
+}
+
+static inline void NVWriteVgaCrtc(struct drm_device *dev,
+ int head, uint8_t index, uint8_t value)
+{
+ NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, value);
+ nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
+ int head, uint8_t index)
+{
+ uint8_t val;
+ nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
+ NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, val);
+ return val;
+}
+
+/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
+ * I suspect they in fact do nothing, but are merely a way to carry useful
+ * per-head variables around
+ *
+ * Known uses:
+ * CR57 CR58
+ * 0x00 index to the appropriate dcb entry (or 7f for inactive)
+ * 0x02 dcb entry's "or" value (or 00 for inactive)
+ * 0x03 bit0 set for dual link (LVDS, possibly elsewhere too)
+ * 0x08 or 0x09 pxclk in MHz
+ * 0x0f laptop panel info - low nibble for PEXTDEV_BOOT_0 strap
+ * high nibble for xlat strap value
+ */
+
+static inline void
+NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
+}
+
+static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
+{
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+ return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
+}
+
+static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
+ int head, uint32_t reg)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint8_t val;
+
+ /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+ * NVSetOwner for the relevant head to be programmed */
+ if (head && dev_priv->card_type == NV_40)
+ reg += NV_PRMVIO_SIZE;
+
+ val = nv_rd08(dev, reg);
+ NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
+ return val;
+}
+
+static inline void NVWritePRMVIO(struct drm_device *dev,
+ int head, uint32_t reg, uint8_t value)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+ * NVSetOwner for the relevant head to be programmed */
+ if (head && dev_priv->card_type == NV_40)
+ reg += NV_PRMVIO_SIZE;
+
+ NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
+ head, reg, value);
+ nv_wr08(dev, reg, value);
+}
+
+static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
+{
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
+}
+
+static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
+{
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
+}
+
+static inline void NVWriteVgaAttr(struct drm_device *dev,
+ int head, uint8_t index, uint8_t value)
+{
+ if (NVGetEnablePalette(dev, head))
+ index &= ~0x20;
+ else
+ index |= 0x20;
+
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, value);
+ nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
+ int head, uint8_t index)
+{
+ uint8_t val;
+ if (NVGetEnablePalette(dev, head))
+ index &= ~0x20;
+ else
+ index |= 0x20;
+
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
+ NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, val);
+ return val;
+}
+
+static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
+{
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
+}
+
+static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
+{
+ uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+ if (protect) {
+ NVVgaSeqReset(dev, head, true);
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+ } else {
+ /* Reenable sequencer, then turn on screen */
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */
+ NVVgaSeqReset(dev, head, false);
+ }
+ NVSetEnablePalette(dev, head, protect);
+}
+
+static inline bool
+nv_heads_tied(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0x11)
+ return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
+
+ return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
+}
+
+/* makes cr0-7 on the specified head read-only */
+static inline bool
+nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
+{
+ uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
+ bool waslocked = cr11 & 0x80;
+
+ if (lock)
+ cr11 |= 0x80;
+ else
+ cr11 &= ~0x80;
+ NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
+
+ return waslocked;
+}
+
+static inline void
+nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
+{
+ /* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
+ * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
+ * bit6: seems to have some effect on CR09 (double scan, VBS_9)
+ * bit5: unlocks HDE
+ * bit4: unlocks VDE
+ * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
+ * bit2: same as bit 1 of 0x60?804
+ * bit0: same as bit 0 of 0x60?804
+ */
+
+ uint8_t cr21 = lock;
+
+ if (lock < 0)
+ /* 0xfa is generic "unlock all" mask */
+ cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
+}
+
+/* renders the extended crtc regs (cr19+) on all crtcs impervious:
+ * immutable and unreadable
+ */
+static inline bool
+NVLockVgaCrtcs(struct drm_device *dev, bool lock)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+
+ NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
+ lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
+ /* NV11 has independently lockable extended crtcs, except when tied */
+ if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
+ NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
+ lock ? NV_CIO_SR_LOCK_VALUE :
+ NV_CIO_SR_UNLOCK_RW_VALUE);
+
+ return waslocked;
+}
+
+/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
+#define NV04_CURSOR_SIZE 32
+/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
+#define NV10_CURSOR_SIZE 64
+
+static inline int nv_cursor_width(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+}
+
+static inline void
+nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
+{
+ /* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
+ * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
+ * for changes to the CRTC CURCTL regs to take effect, whether changing
+ * the pixmap location, or just showing/hiding the cursor
+ */
+ uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
+}
+
+static inline void
+nv_show_cursor(struct drm_device *dev, int head, bool show)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint8_t *curctl1 =
+ &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
+
+ if (show)
+ *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+ else
+ *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
+
+ if (dev_priv->card_type == NV_40)
+ nv_fix_nv40_hw_cursor(dev, head);
+}
+
+static inline uint32_t
+nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int mask;
+
+ if (bpp == 15)
+ bpp = 16;
+ if (bpp == 24)
+ bpp = 8;
+
+ /* Alignment requirements taken from the Haiku driver */
+ if (dev_priv->card_type == NV_04)
+ mask = 128 / bpp - 1;
+ else
+ mask = 512 / bpp - 1;
+
+ return (width + mask) & ~mask;
+}
+
+#endif /* __NOUVEAU_HW_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
new file mode 100644
index 00000000000..70e994d2812
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_hw.h"
+
+static void
+nv04_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+ NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+}
+
+static void
+nv04_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+ NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+}
+
+static int
+nv04_i2c_getscl(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+}
+
+static int
+nv04_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+}
+
+static void
+nv4e_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+ nv_wr32(dev, i2c->wr, val | 0x01);
+}
+
+static void
+nv4e_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+ nv_wr32(dev, i2c->wr, val | 0x01);
+}
+
+static int
+nv4e_i2c_getscl(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+}
+
+static int
+nv4e_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+}
+
+static int
+nv50_i2c_getscl(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(nv_rd32(dev, i2c->rd) & 1);
+}
+
+
+static int
+nv50_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(nv_rd32(dev, i2c->rd) & 2);
+}
+
+static void
+nv50_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+}
+
+static void
+nv50_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ nv_wr32(dev, i2c->wr,
+ (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
+ i2c->data = state;
+}
+
+static const uint32_t nv50_i2c_port[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
+#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+
+int
+nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c;
+ int ret;
+
+ if (entry->chan)
+ return -EEXIST;
+
+ if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
+ NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
+ return -EINVAL;
+ }
+
+ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+ if (i2c == NULL)
+ return -ENOMEM;
+
+ switch (entry->port_type) {
+ case 0:
+ i2c->algo.bit.setsda = nv04_i2c_setsda;
+ i2c->algo.bit.setscl = nv04_i2c_setscl;
+ i2c->algo.bit.getsda = nv04_i2c_getsda;
+ i2c->algo.bit.getscl = nv04_i2c_getscl;
+ i2c->rd = entry->read;
+ i2c->wr = entry->write;
+ break;
+ case 4:
+ i2c->algo.bit.setsda = nv4e_i2c_setsda;
+ i2c->algo.bit.setscl = nv4e_i2c_setscl;
+ i2c->algo.bit.getsda = nv4e_i2c_getsda;
+ i2c->algo.bit.getscl = nv4e_i2c_getscl;
+ i2c->rd = 0x600800 + entry->read;
+ i2c->wr = 0x600800 + entry->write;
+ break;
+ case 5:
+ i2c->algo.bit.setsda = nv50_i2c_setsda;
+ i2c->algo.bit.setscl = nv50_i2c_setscl;
+ i2c->algo.bit.getsda = nv50_i2c_getsda;
+ i2c->algo.bit.getscl = nv50_i2c_getscl;
+ i2c->rd = nv50_i2c_port[entry->read];
+ i2c->wr = i2c->rd;
+ break;
+ case 6:
+ i2c->rd = entry->read;
+ i2c->wr = entry->write;
+ break;
+ default:
+ NV_ERROR(dev, "DCB I2C port type %d unknown\n",
+ entry->port_type);
+ kfree(i2c);
+ return -EINVAL;
+ }
+
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "nouveau-%s-%d", pci_name(dev->pdev), index);
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+
+ if (entry->port_type < 6) {
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.udelay = 40;
+ i2c->algo.bit.timeout = usecs_to_jiffies(5000);
+ i2c->algo.bit.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ } else {
+ i2c->adapter.algo_data = &i2c->algo.dp;
+ i2c->algo.dp.running = false;
+ i2c->algo.dp.address = 0;
+ i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
+ ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "Failed to register i2c %d\n", index);
+ kfree(i2c);
+ return ret;
+ }
+
+ entry->chan = i2c;
+ return 0;
+}
+
+void
+nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+{
+ if (!entry->chan)
+ return;
+
+ i2c_del_adapter(&entry->chan->adapter);
+ kfree(entry->chan);
+ entry->chan = NULL;
+}
+
+struct nouveau_i2c_chan *
+nouveau_i2c_find(struct drm_device *dev, int index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+
+ if (index > DCB_MAX_NUM_I2C_ENTRIES)
+ return NULL;
+
+ if (!bios->bdcb.dcb.i2c[index].chan) {
+ if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
+ return NULL;
+ }
+
+ return bios->bdcb.dcb.i2c[index].chan;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
new file mode 100644
index 00000000000..c8eaf7a9fcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_I2C_H__
+#define __NOUVEAU_I2C_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drm_dp_helper.h"
+
+struct dcb_i2c_entry;
+
+struct nouveau_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ union {
+ struct i2c_algo_bit_data bit;
+ struct i2c_algo_dp_aux_data dp;
+ } algo;
+ unsigned rd;
+ unsigned wr;
+ unsigned data;
+};
+
+int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
+void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+
+int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+
+#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 00000000000..a2c30f4611b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,72 @@
+/**
+ * \file mga_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the MGA DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+#if 0
+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+ fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
+#endif
+ lock_kernel(); /* XXX for now */
+ if (fn != NULL)
+ ret = (*fn)(filp, cmd, arg);
+ else
+ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
new file mode 100644
index 00000000000..370c72c968d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_reg.h"
+#include <linux/ratelimit.h>
+
+/* needed for hotplug irq */
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+void
+nouveau_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Master disable */
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+
+ if (dev_priv->card_type == NV_50) {
+ INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ INIT_LIST_HEAD(&dev_priv->vbl_waiting);
+ }
+}
+
+int
+nouveau_irq_postinstall(struct drm_device *dev)
+{
+ /* Master enable */
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+ return 0;
+}
+
+void
+nouveau_irq_uninstall(struct drm_device *dev)
+{
+ /* Master disable */
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_pgraph_object_method *grm;
+ struct nouveau_pgraph_object_class *grc;
+
+ grc = dev_priv->engine.graph.grclass;
+ while (grc->id) {
+ if (grc->id == class)
+ break;
+ grc++;
+ }
+
+ if (grc->id != class || !grc->methods)
+ return -ENOENT;
+
+ grm = grc->methods;
+ while (grm->id) {
+ if (grm->id == mthd)
+ return grm->exec(chan, class, mthd, data);
+ grm++;
+ }
+
+ return -ENOENT;
+}
+
+static bool
+nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ const int subc = (addr >> 13) & 0x7;
+ const int mthd = addr & 0x1ffc;
+
+ if (mthd == 0x0000) {
+ struct nouveau_gpuobj_ref *ref = NULL;
+
+ if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ return false;
+
+ if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
+ return false;
+
+ chan->sw_subchannel[subc] = ref->gpuobj->class;
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
+ NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
+ return true;
+ }
+
+ /* hw object */
+ if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
+ return false;
+
+ if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
+ return false;
+
+ return true;
+}
+
+static void
+nouveau_fifo_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t status, reassign;
+ int cnt = 0;
+
+ reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ struct nouveau_channel *chan = NULL;
+ uint32_t chid, get;
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ chid = engine->fifo.channel_id(dev);
+ if (chid >= 0 && chid < engine->fifo.channels)
+ chan = dev_priv->fifos[chid];
+ get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+ * wrapping on my G80 chips, but CACHE1 isn't big
+ * enough for this much data.. Tests show that it
+ * wraps around to the start at GET=0x800.. No clue
+ * as to why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (dev_priv->card_type < NV_40) {
+ mthd = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
+ NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
+
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_DMA_PUSHER);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
+ if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
+ get + 4);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
+ nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+ status = 0;
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+ nv_wr32(dev, 0x2140, 0);
+ nv_wr32(dev, 0x140, 0);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
+
+struct nouveau_bitfield_names {
+ uint32_t mask;
+ const char *name;
+};
+
+static struct nouveau_bitfield_names nstatus_names[] =
+{
+ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nstatus_names_nv10[] =
+{
+ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nsource_names[] =
+{
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
+};
+
+static void
+nouveau_print_bitfield_names_(uint32_t value,
+ const struct nouveau_bitfield_names *namelist,
+ const int namelist_len)
+{
+ /*
+ * Caller must have already printed the KERN_* log level for us.
+ * Also the caller is responsible for adding the newline.
+ */
+ int i;
+ for (i = 0; i < namelist_len; ++i) {
+ uint32_t mask = namelist[i].mask;
+ if (value & mask) {
+ printk(" %s", namelist[i].name);
+ value &= ~mask;
+ }
+ }
+ if (value)
+ printk(" (unknown bits 0x%08x)", value);
+}
+#define nouveau_print_bitfield_names(val, namelist) \
+ nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
+
+
+static int
+nouveau_graph_chid_from_grctx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int i;
+
+ if (dev_priv->card_type < NV_40)
+ return dev_priv->engine.fifo.channels;
+ else
+ if (dev_priv->card_type < NV_50) {
+ inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (!chan || !chan->ramin_grctx)
+ continue;
+
+ if (inst == chan->ramin_grctx->instance)
+ break;
+ }
+ } else {
+ inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->instance)
+ break;
+ }
+ }
+
+
+ return i;
+}
+
+static int
+nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ int channel;
+
+ if (dev_priv->card_type < NV_10)
+ channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
+ else
+ if (dev_priv->card_type < NV_40)
+ channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ else
+ channel = nouveau_graph_chid_from_grctx(dev);
+
+ if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
+ NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
+ return -EINVAL;
+ }
+
+ *channel_ret = channel;
+ return 0;
+}
+
+struct nouveau_pgraph_trap {
+ int channel;
+ int class;
+ int subc, mthd, size;
+ uint32_t data, data2;
+ uint32_t nsource, nstatus;
+};
+
+static void
+nouveau_graph_trap_info(struct drm_device *dev,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t address;
+
+ trap->nsource = trap->nstatus = 0;
+ if (dev_priv->card_type < NV_50) {
+ trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ }
+
+ if (nouveau_graph_trapped_channel(dev, &trap->channel))
+ trap->channel = -1;
+ address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+
+ trap->mthd = address & 0x1FFC;
+ trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ if (dev_priv->card_type < NV_10) {
+ trap->subc = (address >> 13) & 0x7;
+ } else {
+ trap->subc = (address >> 16) & 0x7;
+ trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
+ }
+
+ if (dev_priv->card_type < NV_10)
+ trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
+ else if (dev_priv->card_type < NV_40)
+ trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
+ else if (dev_priv->card_type < NV_50)
+ trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
+ else
+ trap->class = nv_rd32(dev, 0x400814);
+}
+
+static void
+nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
+
+ NV_INFO(dev, "%s - nSource:", id);
+ nouveau_print_bitfield_names(nsource, nsource_names);
+ printk(", nStatus:");
+ if (dev_priv->card_type < NV_10)
+ nouveau_print_bitfield_names(nstatus, nstatus_names);
+ else
+ nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
+ printk("\n");
+
+ NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
+ "Data 0x%08x:0x%08x\n",
+ id, trap->channel, trap->subc,
+ trap->class, trap->mthd,
+ trap->data2, trap->data);
+}
+
+static int
+nouveau_pgraph_intr_swmthd(struct drm_device *dev,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (trap->channel < 0 ||
+ trap->channel >= dev_priv->engine.fifo.channels ||
+ !dev_priv->fifos[trap->channel])
+ return -ENODEV;
+
+ return nouveau_call_method(dev_priv->fifos[trap->channel],
+ trap->class, trap->mthd, trap->data);
+}
+
+static inline void
+nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
+{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
+
+ nouveau_graph_trap_info(dev, &trap);
+
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (nouveau_pgraph_intr_swmthd(dev, &trap))
+ unhandled = 1;
+ } else {
+ unhandled = 1;
+ }
+
+ if (unhandled)
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
+}
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+static int nouveau_ratelimit(void)
+{
+ return __ratelimit(&nouveau_ratelimit_state);
+}
+
+
+static inline void
+nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
+{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
+
+ nouveau_graph_trap_info(dev, &trap);
+ trap.nsource = nsource;
+
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (nouveau_pgraph_intr_swmthd(dev, &trap))
+ unhandled = 1;
+ } else {
+ unhandled = 1;
+ }
+
+ if (unhandled && nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
+}
+
+static inline void
+nouveau_pgraph_intr_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t chid;
+
+ chid = engine->fifo.channel_id(dev);
+ NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ nv04_graph_context_switch(dev);
+ break;
+ case NV_10:
+ nv10_graph_context_switch(dev);
+ break;
+ default:
+ NV_ERROR(dev, "Context switch not implemented\n");
+ break;
+ }
+}
+
+static void
+nouveau_pgraph_irq_handler(struct drm_device *dev)
+{
+ uint32_t status;
+
+ while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+
+ if (status & NV_PGRAPH_INTR_NOTIFY) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+
+ status &= ~NV_PGRAPH_INTR_NOTIFY;
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
+ }
+
+ if (status & NV_PGRAPH_INTR_ERROR) {
+ nouveau_pgraph_intr_error(dev, nsource);
+
+ status &= ~NV_PGRAPH_INTR_ERROR;
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
+ }
+
+ if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nouveau_pgraph_intr_context_switch(dev);
+
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv_wr32(dev, NV03_PGRAPH_INTR,
+ NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ }
+
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
+
+ if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nv50_pgraph_irq_handler(struct drm_device *dev)
+{
+ uint32_t status, nsource;
+
+ status = nv_rd32(dev, NV03_PGRAPH_INTR);
+ nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+
+ if (status & 0x00000001) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+ status &= ~0x00000001;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+ }
+
+ if (status & 0x00000010) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+
+ status &= ~0x00000010;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+ }
+
+ if (status & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, 0x400500, 0x00010001);
+
+ nv50_graph_context_switch(dev);
+
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ }
+
+ if (status & 0x00100000) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_DATA_ERROR);
+
+ status &= ~0x00100000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+ }
+
+ if (status & 0x00200000) {
+ int r;
+
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+ NV_ERROR(dev, "magic set 1:\n");
+ for (r = 0x408900; r <= 0x408910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
+ for (r = 0x408e08; r <= 0x408e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+ NV_ERROR(dev, "magic set 2:\n");
+ for (r = 0x409900; r <= 0x409910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
+ for (r = 0x409e08; r <= 0x409e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+ status &= ~0x00200000;
+ nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+ }
+
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
+
+ {
+ const int isb = (1 << 16) | (1 << 0);
+
+ if ((nv_rd32(dev, 0x400500) & isb) != isb)
+ nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
+{
+ if (crtc & 1)
+ nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+
+ if (crtc & 2)
+ nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+}
+
+irqreturn_t
+nouveau_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t status, fbdev_flags = 0;
+
+ status = nv_rd32(dev, NV03_PMC_INTR_0);
+ if (!status)
+ return IRQ_NONE;
+
+ if (dev_priv->fbdev_info) {
+ fbdev_flags = dev_priv->fbdev_info->flags;
+ dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
+ nouveau_fifo_irq_handler(dev);
+ status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
+ if (dev_priv->card_type >= NV_50)
+ nv50_pgraph_irq_handler(dev);
+ else
+ nouveau_pgraph_irq_handler(dev);
+
+ status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
+ nouveau_crtc_irq_handler(dev, (status>>24)&3);
+ status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
+ }
+
+ if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
+ NV_PMC_INTR_0_NV50_I2C_PENDING)) {
+ nv50_display_irq_handler(dev);
+ status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
+ NV_PMC_INTR_0_NV50_I2C_PENDING);
+ }
+
+ if (status)
+ NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
+
+ if (dev_priv->fbdev_info)
+ dev_priv->fbdev_info->flags = fbdev_flags;
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 00000000000..02755712ed3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ * Copyright 2005 Stephane Marchesin
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "nouveau_drv.h"
+
+static struct mem_block *
+split_block(struct mem_block *p, uint64_t start, uint64_t size,
+ struct drm_file *file_priv)
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock =
+ kmalloc(sizeof(*newblock), GFP_KERNEL);
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock =
+ kmalloc(sizeof(*newblock), GFP_KERNEL);
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+out:
+ /* Our block is in the middle */
+ p->file_priv = file_priv;
+ return p;
+}
+
+struct mem_block *
+nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
+ int align2, struct drm_file *file_priv, int tail)
+{
+ struct mem_block *p;
+ uint64_t mask = (1 << align2) - 1;
+
+ if (!heap)
+ return NULL;
+
+ if (tail) {
+ list_for_each_prev(p, heap) {
+ uint64_t start = ((p->start + p->size) - size) & ~mask;
+
+ if (p->file_priv == NULL && start >= p->start &&
+ start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
+ }
+ } else {
+ list_for_each(p, heap) {
+ uint64_t start = (p->start + mask) & ~mask;
+
+ if (p->file_priv == NULL &&
+ start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
+ }
+ }
+
+ return NULL;
+}
+
+void nouveau_mem_free_block(struct mem_block *p)
+{
+ p->file_priv = NULL;
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->file_priv == NULL) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ kfree(q);
+ }
+
+ if (p->prev->file_priv == NULL) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ kfree(p);
+ }
+}
+
+/* Initialize. How to check for an uninitialized heap?
+ */
+int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
+ uint64_t size)
+{
+ struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
+
+ if (!blocks)
+ return -ENOMEM;
+
+ *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
+ if (!*heap) {
+ kfree(blocks);
+ return -ENOMEM;
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->file_priv = NULL;
+ blocks->next = blocks->prev = *heap;
+
+ memset(*heap, 0, sizeof(**heap));
+ (*heap)->file_priv = (struct drm_file *) -1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return 0;
+}
+
+/*
+ * Free all blocks associated with the releasing file_priv
+ */
+void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ list_for_each(p, heap) {
+ if (p->file_priv == file_priv)
+ p->file_priv = NULL;
+ }
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ list_for_each(p, heap) {
+ while ((p->file_priv == NULL) &&
+ (p->next->file_priv == NULL) &&
+ (p->next != heap)) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ kfree(q);
+ }
+ }
+}
+
+/*
+ * NV50 VM helpers
+ */
+int
+nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
+ uint32_t flags, uint64_t phys)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj **pgt;
+ unsigned psz, pfl, pages;
+
+ if (virt >= dev_priv->vm_gart_base &&
+ (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
+ psz = 12;
+ pgt = &dev_priv->gart_info.sg_ctxdma;
+ pfl = 0x21;
+ virt -= dev_priv->vm_gart_base;
+ } else
+ if (virt >= dev_priv->vm_vram_base &&
+ (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
+ psz = 16;
+ pgt = dev_priv->vm_vram_pt;
+ pfl = 0x01;
+ virt -= dev_priv->vm_vram_base;
+ } else {
+ NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
+ virt, virt + size - 1);
+ return -EINVAL;
+ }
+
+ pages = size >> psz;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ if (flags & 0x80000000) {
+ while (pages--) {
+ struct nouveau_gpuobj *pt = pgt[virt >> 29];
+ unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+
+ nv_wo32(dev, pt, pte++, 0x00000000);
+ nv_wo32(dev, pt, pte++, 0x00000000);
+
+ virt += (1 << psz);
+ }
+ } else {
+ while (pages--) {
+ struct nouveau_gpuobj *pt = pgt[virt >> 29];
+ unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+ unsigned offset_h = upper_32_bits(phys) & 0xff;
+ unsigned offset_l = lower_32_bits(phys);
+
+ nv_wo32(dev, pt, pte++, offset_l | pfl);
+ nv_wo32(dev, pt, pte++, offset_h | flags);
+
+ phys += (1 << psz);
+ virt += (1 << psz);
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void
+nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+{
+ nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+}
+
+/*
+ * Cleanup everything
+ */
+void nouveau_mem_takedown(struct mem_block **heap)
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next; p != *heap;) {
+ struct mem_block *q = p;
+ p = p->next;
+ kfree(q);
+ }
+
+ kfree(*heap);
+ *heap = NULL;
+}
+
+void nouveau_mem_close(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+
+ ttm_bo_device_release(&dev_priv->ttm.bdev);
+
+ nouveau_ttm_global_release(dev_priv);
+
+ if (drm_core_has_AGP(dev) && dev->agp &&
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_agp_mem *entry, *tempe;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until drv_cleanup is called. */
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+ if (entry->bound)
+ drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&dev->agp->memory);
+
+ if (dev->agp->acquired)
+ drm_agp_release(dev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+
+ if (dev_priv->fb_mtrr) {
+ drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
+ drm_get_resource_len(dev, 1), DRM_MTRR_WC);
+ dev_priv->fb_mtrr = 0;
+ }
+}
+
+/*XXX won't work on BSD because of pci_read_config_dword */
+static uint32_t
+nouveau_mem_fb_amount_igp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t mem;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ NV_ERROR(dev, "no bridge device\n");
+ return 0;
+ }
+
+ if (dev_priv->flags&NV_NFORCE) {
+ pci_read_config_dword(bridge, 0x7C, &mem);
+ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+ } else
+ if (dev_priv->flags&NV_NFORCE2) {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+ }
+
+ NV_ERROR(dev, "impossible!\n");
+ return 0;
+}
+
+/* returns the amount of FB ram in bytes */
+uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t boot0;
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ boot0 = nv_rd32(dev, NV03_BOOT_0);
+ if (boot0 & 0x00000100)
+ return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
+
+ switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
+ case NV04_BOOT_0_RAM_AMOUNT_32MB:
+ return 32 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_16MB:
+ return 16 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_8MB:
+ return 8 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_4MB:
+ return 4 * 1024 * 1024;
+ }
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ case NV_50:
+ default:
+ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+ return nouveau_mem_fb_amount_igp(dev);
+ } else {
+ uint64_t mem;
+ mem = (nv_rd32(dev, NV04_FIFO_DATA) &
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
+ return mem * 1024 * 1024;
+ }
+ break;
+ }
+
+ NV_ERROR(dev,
+ "Unable to detect video ram size. Please report your setup to "
+ DRIVER_EMAIL "\n");
+ return 0;
+}
+
+static void nouveau_mem_reset_agp(struct drm_device *dev)
+{
+ uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
+
+ saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
+ saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
+
+ /* clear busmaster bit */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
+ /* clear SBA and AGP bits */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
+
+ /* power cycle pgraph, if enabled */
+ pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
+ if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+ }
+
+ /* and restore (gives effect of resetting AGP) */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
+ nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
+}
+
+int
+nouveau_mem_init_agp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_agp_info info;
+ struct drm_agp_mode mode;
+ int ret;
+
+ if (nouveau_noagp)
+ return 0;
+
+ nouveau_mem_reset_agp(dev);
+
+ if (!dev->agp->acquired) {
+ ret = drm_agp_acquire(dev);
+ if (ret) {
+ NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = drm_agp_info(dev, &info);
+ if (ret) {
+ NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
+ return ret;
+ }
+
+ /* see agp.h for the AGPSTAT_* modes available */
+ mode.mode = info.mode;
+ ret = drm_agp_enable(dev, mode);
+ if (ret) {
+ NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->gart_info.type = NOUVEAU_GART_AGP;
+ dev_priv->gart_info.aper_base = info.aperture_base;
+ dev_priv->gart_info.aper_size = info.aperture_size;
+ return 0;
+}
+
+int
+nouveau_mem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ int ret, dma_bits = 32;
+
+ dev_priv->fb_phys = drm_get_resource_start(dev, 1);
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
+ if (dev_priv->card_type >= NV_50 &&
+ pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ dma_bits = 40;
+
+ ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+ if (ret) {
+ NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_ttm_global_init(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
+ dev_priv->ttm.bo_global_ref.ref.object,
+ &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+ dma_bits <= 32 ? true : false);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
+ spin_lock_init(&dev_priv->ttm.bo_list_lock);
+
+ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
+
+ dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+ if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
+ dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
+ dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
+ NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
+
+ /* remove reserved space at end of vram from available amount */
+ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
+ dev_priv->fb_aper_free = dev_priv->fb_available_size;
+
+ /* mappable vram */
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ dev_priv->fb_available_size >> PAGE_SHIFT);
+ if (ret) {
+ NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
+ return ret;
+ }
+
+ /* GART */
+#if !defined(__powerpc__) && !defined(__ia64__)
+ if (drm_device_is_agp(dev) && dev->agp) {
+ ret = nouveau_mem_init_agp(dev);
+ if (ret)
+ NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
+ }
+#endif
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
+ ret = nouveau_sgdma_init(dev);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
+ return ret;
+ }
+ }
+
+ NV_INFO(dev, "%d MiB GART (aperture)\n",
+ (int)(dev_priv->gart_info.aper_size >> 20));
+ dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
+ dev_priv->gart_info.aper_size >> PAGE_SHIFT);
+ if (ret) {
+ NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
+ drm_get_resource_len(dev, 1),
+ DRM_MTRR_WC);
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
new file mode 100644
index 00000000000..6c66a34b634
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nouveau_notifier_init_channel(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_bo *ntfy = NULL;
+ int ret;
+
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
+ TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+ 0, 0x0000, false, true, &ntfy);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_bo_map(ntfy);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
+ if (ret)
+ goto out_err;
+
+ chan->notifier_bo = ntfy;
+out_err:
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(ntfy->gem);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ return ret;
+}
+
+void
+nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ if (!chan->notifier_bo)
+ return;
+
+ nouveau_bo_unmap(chan->notifier_bo);
+ mutex_lock(&dev->struct_mutex);
+ nouveau_bo_unpin(chan->notifier_bo);
+ drm_gem_object_unreference(chan->notifier_bo->gem);
+ mutex_unlock(&dev->struct_mutex);
+ nouveau_mem_takedown(&chan->notifier_heap);
+}
+
+static void
+nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
+ struct nouveau_gpuobj *gpuobj)
+{
+ NV_DEBUG(dev, "\n");
+
+ if (gpuobj->priv)
+ nouveau_mem_free_block(gpuobj->priv);
+}
+
+int
+nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
+ int size, uint32_t *b_offset)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *nobj = NULL;
+ struct mem_block *mem;
+ uint32_t offset;
+ int target, ret;
+
+ if (!chan->notifier_heap) {
+ NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
+ chan->id);
+ return -EINVAL;
+ }
+
+ mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
+ (struct drm_file *)-2, 0);
+ if (!mem) {
+ NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
+ return -ENOMEM;
+ }
+
+ offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
+ target = NV_DMA_TARGET_VIDMEM;
+ } else
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
+ dev_priv->card_type < NV_50) {
+ ret = nouveau_sgdma_get_page(dev, offset, &offset);
+ if (ret)
+ return ret;
+ target = NV_DMA_TARGET_PCI;
+ } else {
+ target = NV_DMA_TARGET_AGP;
+ }
+ } else {
+ NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
+ chan->notifier_bo->bo.mem.mem_type);
+ return -EINVAL;
+ }
+ offset += mem->start;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
+ mem->size, NV_DMA_ACCESS_RW, target,
+ &nobj);
+ if (ret) {
+ nouveau_mem_free_block(mem);
+ NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
+ return ret;
+ }
+ nobj->dtor = nouveau_notifier_gpuobj_dtor;
+ nobj->priv = mem;
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &nobj);
+ nouveau_mem_free_block(mem);
+ NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ *b_offset = mem->start;
+ return 0;
+}
+
+int
+nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
+{
+ if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
+ return -EINVAL;
+
+ if (poffset) {
+ struct mem_block *mem = nobj->priv;
+
+ if (*poffset >= mem->size)
+ return false;
+
+ *poffset += mem->start;
+ }
+
+ return 0;
+}
+
+int
+nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_notifierobj_alloc *na = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
new file mode 100644
index 00000000000..93379bb81be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -0,0 +1,1294 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/* NVidia uses context objects to drive drawing operations.
+
+ Context objects can be selected into 8 subchannels in the FIFO,
+ and then used via DMA command buffers.
+
+ A context object is referenced by a user defined handle (CARD32). The HW
+ looks up graphics objects in a hash table in the instance RAM.
+
+ An entry in the hash table consists of 2 CARD32. The first CARD32 contains
+ the handle, the second one a bitfield, that contains the address of the
+ object in instance RAM.
+
+ The format of the second CARD32 seems to be:
+
+ NV4 to NV30:
+
+ 15: 0 instance_addr >> 4
+ 17:16 engine (here uses 1 = graphics)
+ 28:24 channel id (here uses 0)
+ 31 valid (use 1)
+
+ NV40:
+
+ 15: 0 instance_addr >> 4 (maybe 19-0)
+ 21:20 engine (here uses 1 = graphics)
+ I'm unsure about the other bits, but using 0 seems to work.
+
+ The key into the hash table depends on the object handle and channel id and
+ is given as:
+*/
+static uint32_t
+nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t hash = 0;
+ int i;
+
+ NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
+
+ for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
+ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
+ handle >>= dev_priv->ramht_bits;
+ }
+
+ if (dev_priv->card_type < NV_50)
+ hash ^= channel << (dev_priv->ramht_bits - 4);
+ hash <<= 3;
+
+ NV_DEBUG(dev, "hash=0x%08x\n", hash);
+ return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+ uint32_t offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
+
+ if (dev_priv->card_type < NV_40)
+ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+ return (ctx != 0);
+}
+
+static int
+nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_channel *chan = ref->channel;
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+ uint32_t ctx, co, ho;
+
+ if (!ramht) {
+ NV_ERROR(dev, "No hash table!\n");
+ return -EINVAL;
+ }
+
+ if (dev_priv->card_type < NV_40) {
+ ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
+ (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ ctx = (ref->instance >> 4) |
+ (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else {
+ if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+ ctx = (ref->instance << 10) | 2;
+ } else {
+ ctx = (ref->instance >> 4) |
+ ((ref->gpuobj->engine <<
+ NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+ }
+ }
+
+ instmem->prepare_access(dev, true);
+ co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
+ do {
+ if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+ NV_DEBUG(dev,
+ "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, ref->handle, ctx);
+ nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
+ nv_wo32(dev, ramht, (co + 4)/4, ctx);
+
+ list_add_tail(&ref->list, &chan->ramht_refs);
+ instmem->finish_access(dev);
+ return 0;
+ }
+ NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+ chan->id, co, nv_ro32(dev, ramht, co/4));
+
+ co += 8;
+ if (co >= dev_priv->ramht_size)
+ co = 0;
+ } while (co != ho);
+ instmem->finish_access(dev);
+
+ NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+ return -ENOMEM;
+}
+
+static void
+nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_channel *chan = ref->channel;
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+ uint32_t co, ho;
+
+ if (!ramht) {
+ NV_ERROR(dev, "No hash table!\n");
+ return;
+ }
+
+ instmem->prepare_access(dev, true);
+ co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
+ do {
+ if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+ (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
+ NV_DEBUG(dev,
+ "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, ref->handle,
+ nv_ro32(dev, ramht, (co + 4)));
+ nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
+ nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
+
+ list_del(&ref->list);
+ instmem->finish_access(dev);
+ return;
+ }
+
+ co += 8;
+ if (co >= dev_priv->ramht_size)
+ co = 0;
+ } while (co != ho);
+ list_del(&ref->list);
+ instmem->finish_access(dev);
+
+ NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+ chan->id, ref->handle);
+}
+
+int
+nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
+ uint32_t size, int align, uint32_t flags,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_gpuobj *gpuobj;
+ struct mem_block *pramin = NULL;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
+ chan ? chan->id : -1, size, align, flags);
+
+ if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
+ return -EINVAL;
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+ gpuobj->flags = flags;
+ gpuobj->im_channel = chan;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ /* Choose between global instmem heap, and per-channel private
+ * instmem heap. On <NV50 allow requests for private instmem
+ * to be satisfied from global heap if no per-channel area
+ * available.
+ */
+ if (chan) {
+ if (chan->ramin_heap) {
+ NV_DEBUG(dev, "private heap\n");
+ pramin = chan->ramin_heap;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ NV_DEBUG(dev, "global heap fallback\n");
+ pramin = dev_priv->ramin_heap;
+ }
+ } else {
+ NV_DEBUG(dev, "global heap\n");
+ pramin = dev_priv->ramin_heap;
+ }
+
+ if (!pramin) {
+ NV_ERROR(dev, "No PRAMIN heap!\n");
+ return -EINVAL;
+ }
+
+ if (!chan) {
+ ret = engine->instmem.populate(dev, gpuobj, &size);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+ }
+
+ /* Allocate a chunk of the PRAMIN aperture */
+ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
+ drm_order(align),
+ (struct drm_file *)-2, 0);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+
+ if (!chan) {
+ ret = engine->instmem.bind(dev, gpuobj);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ int i;
+
+ engine->instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ nv_wo32(dev, gpuobj, i/4, 0);
+ engine->instmem.finish_access(dev);
+ }
+
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
+int
+nouveau_gpuobj_early_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG(dev, "\n");
+
+ INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_gpuobj_new_fake(dev,
+ dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
+ &dev_priv->ramht, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG(dev, "\n");
+
+ nouveau_gpuobj_del(dev, &dev_priv->ramht);
+}
+
+void
+nouveau_gpuobj_late_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ struct list_head *entry, *tmp;
+
+ NV_DEBUG(dev, "\n");
+
+ list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
+ gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
+
+ NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
+ gpuobj, gpuobj->refcount);
+ gpuobj->refcount = 0;
+ nouveau_gpuobj_del(dev, &gpuobj);
+ }
+}
+
+int
+nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_gpuobj *gpuobj;
+ int i;
+
+ NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
+
+ if (!dev_priv || !pgpuobj || !(*pgpuobj))
+ return -EINVAL;
+ gpuobj = *pgpuobj;
+
+ if (gpuobj->refcount != 0) {
+ NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
+ return -EINVAL;
+ }
+
+ if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+ engine->instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ nv_wo32(dev, gpuobj, i/4, 0);
+ engine->instmem.finish_access(dev);
+ }
+
+ if (gpuobj->dtor)
+ gpuobj->dtor(dev, gpuobj);
+
+ if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
+ engine->instmem.clear(dev, gpuobj);
+
+ if (gpuobj->im_pramin) {
+ if (gpuobj->flags & NVOBJ_FLAG_FAKE)
+ kfree(gpuobj->im_pramin);
+ else
+ nouveau_mem_free_block(gpuobj->im_pramin);
+ }
+
+ list_del(&gpuobj->list);
+
+ *pgpuobj = NULL;
+ kfree(gpuobj);
+ return 0;
+}
+
+static int
+nouveau_gpuobj_instance_get(struct drm_device *dev,
+ struct nouveau_channel *chan,
+ struct nouveau_gpuobj *gpuobj, uint32_t *inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *cpramin;
+
+ /* <NV50 use PRAMIN address everywhere */
+ if (dev_priv->card_type < NV_50) {
+ *inst = gpuobj->im_pramin->start;
+ return 0;
+ }
+
+ if (chan && gpuobj->im_channel != chan) {
+ NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
+ gpuobj->im_channel->id, chan->id);
+ return -EINVAL;
+ }
+
+ /* NV50 channel-local instance */
+ if (chan) {
+ cpramin = chan->ramin->gpuobj;
+ *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
+ return 0;
+ }
+
+ /* NV50 global (VRAM) instance */
+ if (!gpuobj->im_channel) {
+ /* ...from global heap */
+ if (!gpuobj->im_backing) {
+ NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
+ return -EINVAL;
+ }
+ *inst = gpuobj->im_backing_start;
+ return 0;
+ } else {
+ /* ...from local heap */
+ cpramin = gpuobj->im_channel->ramin->gpuobj;
+ *inst = cpramin->im_backing_start +
+ (gpuobj->im_pramin->start - cpramin->im_pramin->start);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
+ uint32_t handle, struct nouveau_gpuobj *gpuobj,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_ref *ref;
+ uint32_t instance;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
+ chan ? chan->id : -1, handle, gpuobj);
+
+ if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
+ return -EINVAL;
+
+ if (!chan && !ref_ret)
+ return -EINVAL;
+
+ if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
+ /* sw object */
+ instance = 0x40;
+ } else {
+ ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
+ if (ret)
+ return ret;
+ }
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&ref->list);
+ ref->gpuobj = gpuobj;
+ ref->channel = chan;
+ ref->instance = instance;
+
+ if (!ref_ret) {
+ ref->handle = handle;
+
+ ret = nouveau_ramht_insert(dev, ref);
+ if (ret) {
+ kfree(ref);
+ return ret;
+ }
+ } else {
+ ref->handle = ~0;
+ *ref_ret = ref;
+ }
+
+ ref->gpuobj->refcount++;
+ return 0;
+}
+
+int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
+{
+ struct nouveau_gpuobj_ref *ref;
+
+ NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
+
+ if (!dev || !pref || *pref == NULL)
+ return -EINVAL;
+ ref = *pref;
+
+ if (ref->handle != ~0)
+ nouveau_ramht_remove(dev, ref);
+
+ if (ref->gpuobj) {
+ ref->gpuobj->refcount--;
+
+ if (ref->gpuobj->refcount == 0) {
+ if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
+ nouveau_gpuobj_del(dev, &ref->gpuobj);
+ }
+ }
+
+ *pref = NULL;
+ kfree(ref);
+ return 0;
+}
+
+int
+nouveau_gpuobj_new_ref(struct drm_device *dev,
+ struct nouveau_channel *oc, struct nouveau_channel *rc,
+ uint32_t handle, uint32_t size, int align,
+ uint32_t flags, struct nouveau_gpuobj_ref **ref)
+{
+ struct nouveau_gpuobj *gpuobj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct nouveau_gpuobj_ref *ref;
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+ if (ref->handle == handle) {
+ if (ref_ret)
+ *ref_ret = ref;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
+ uint32_t b_offset, uint32_t size,
+ uint32_t flags, struct nouveau_gpuobj **pgpuobj,
+ struct nouveau_gpuobj_ref **pref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ int i;
+
+ NV_DEBUG(dev,
+ "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
+ p_offset, b_offset, size, flags);
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+ gpuobj->im_channel = NULL;
+ gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ if (p_offset != ~0) {
+ gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
+ GFP_KERNEL);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_pramin->start = p_offset;
+ gpuobj->im_pramin->size = size;
+ }
+
+ if (b_offset != ~0) {
+ gpuobj->im_backing = (struct nouveau_bo *)-1;
+ gpuobj->im_backing_start = b_offset;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ nv_wo32(dev, gpuobj, i/4, 0);
+ dev_priv->engine.instmem.finish_access(dev);
+ }
+
+ if (pref) {
+ i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
+ if (i) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return i;
+ }
+ }
+
+ if (pgpuobj)
+ *pgpuobj = gpuobj;
+ return 0;
+}
+
+
+static uint32_t
+nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*XXX: dodgy hack for now */
+ if (dev_priv->card_type >= NV_50)
+ return 24;
+ if (dev_priv->card_type >= NV_40)
+ return 32;
+ return 16;
+}
+
+/*
+ DMA objects are used to reference a piece of memory in the
+ framebuffer, PCI or AGP address space. Each object is 16 bytes big
+ and looks as follows:
+
+ entry[0]
+ 11:0 class (seems like I can always use 0 here)
+ 12 page table present?
+ 13 page entry linear?
+ 15:14 access: 0 rw, 1 ro, 2 wo
+ 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
+ 31:20 dma adjust (bits 0-11 of the address)
+ entry[1]
+ dma limit (size of transfer)
+ entry[X]
+ 1 0 readonly, 1 readwrite
+ 31:12 dma frame address of the page (bits 12-31 of the address)
+ entry[N]
+ page table terminator, same value as the first pte, as does nvidia
+ rivatv uses 0xffffffff
+
+ Non linear page tables need a list of frame addresses afterwards,
+ the rivatv project has some info on this.
+
+ The method below creates a DMA object in instance RAM and returns a handle
+ to it that can be used to set up context objects.
+*/
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
+ chan->id, class, offset, size);
+ NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+
+ switch (target) {
+ case NV_DMA_TARGET_AGP:
+ offset += dev_priv->gart_info.aper_base;
+ break;
+ default:
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan,
+ nouveau_gpuobj_class_instmem_size(dev, class),
+ 16, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+ return ret;
+ }
+
+ instmem->prepare_access(dev, true);
+
+ if (dev_priv->card_type < NV_50) {
+ uint32_t frame, adjust, pte_flags = 0;
+
+ if (access != NV_DMA_ACCESS_RO)
+ pte_flags |= (1<<1);
+ adjust = offset & 0x00000fff;
+ frame = offset & ~0x00000fff;
+
+ nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
+ (adjust << 20) |
+ (access << 14) |
+ (target << 16) |
+ class));
+ nv_wo32(dev, *gpuobj, 1, size - 1);
+ nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
+ nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
+ } else {
+ uint64_t limit = offset + size - 1;
+ uint32_t flags0, flags5;
+
+ if (target == NV_DMA_TARGET_VIDMEM) {
+ flags0 = 0x00190000;
+ flags5 = 0x00010000;
+ } else {
+ flags0 = 0x7fc00000;
+ flags5 = 0x00080000;
+ }
+
+ nv_wo32(dev, *gpuobj, 0, flags0 | class);
+ nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
+ nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
+ nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
+ (upper_32_bits(offset) & 0xff));
+ nv_wo32(dev, *gpuobj, 5, flags5);
+ }
+
+ instmem->finish_access(dev);
+
+ (*gpuobj)->engine = NVOBJ_ENGINE_SW;
+ (*gpuobj)->class = class;
+ return 0;
+}
+
+int
+nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
+ uint64_t offset, uint64_t size, int access,
+ struct nouveau_gpuobj **gpuobj,
+ uint32_t *o_ret)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
+ (dev_priv->card_type >= NV_50 &&
+ dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ offset + dev_priv->vm_gart_base,
+ size, access, NV_DMA_TARGET_AGP,
+ gpuobj);
+ if (o_ret)
+ *o_ret = 0;
+ } else
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
+ *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ if (offset & ~0xffffffffULL) {
+ NV_ERROR(dev, "obj offset exceeds 32-bits\n");
+ return -EINVAL;
+ }
+ if (o_ret)
+ *o_ret = (uint32_t)offset;
+ ret = (*gpuobj != NULL) ? 0 : -EINVAL;
+ } else {
+ NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Context objects in the instance RAM have the following structure.
+ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
+
+ NV4 - NV30:
+
+ entry[0]
+ 11:0 class
+ 12 chroma key enable
+ 13 user clip enable
+ 14 swizzle enable
+ 17:15 patch config:
+ scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
+ 18 synchronize enable
+ 19 endian: 1 big, 0 little
+ 21:20 dither mode
+ 23 single step enable
+ 24 patch status: 0 invalid, 1 valid
+ 25 context_surface 0: 1 valid
+ 26 context surface 1: 1 valid
+ 27 context pattern: 1 valid
+ 28 context rop: 1 valid
+ 29,30 context beta, beta4
+ entry[1]
+ 7:0 mono format
+ 15:8 color format
+ 31:16 notify instance address
+ entry[2]
+ 15:0 dma 0 instance address
+ 31:16 dma 1 instance address
+ entry[3]
+ dma method traps
+
+ NV40:
+ No idea what the exact format is. Here's what can be deducted:
+
+ entry[0]:
+ 11:0 class (maybe uses more bits here?)
+ 17 user clip enable
+ 21:19 patch config
+ 25 patch status valid ?
+ entry[1]:
+ 15:0 DMA notifier (maybe 20:0)
+ entry[2]:
+ 15:0 DMA 0 instance (maybe 20:0)
+ 24 big endian
+ entry[3]:
+ 15:0 DMA 1 instance (maybe 20:0)
+ entry[4]:
+ entry[5]:
+ set to 0?
+*/
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+
+ ret = nouveau_gpuobj_new(dev, chan,
+ nouveau_gpuobj_class_instmem_size(dev, class),
+ 16,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ if (dev_priv->card_type >= NV_50) {
+ nv_wo32(dev, *gpuobj, 0, class);
+ nv_wo32(dev, *gpuobj, 5, 0x00010000);
+ } else {
+ switch (class) {
+ case NV_CLASS_NULL:
+ nv_wo32(dev, *gpuobj, 0, 0x00001030);
+ nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
+ break;
+ default:
+ if (dev_priv->card_type >= NV_40) {
+ nv_wo32(dev, *gpuobj, 0, class);
+#ifdef __BIG_ENDIAN
+ nv_wo32(dev, *gpuobj, 2, 0x01000000);
+#endif
+ } else {
+#ifdef __BIG_ENDIAN
+ nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
+#else
+ nv_wo32(dev, *gpuobj, 0, class);
+#endif
+ }
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ (*gpuobj)->engine = NVOBJ_ENGINE_GR;
+ (*gpuobj)->class = class;
+ return 0;
+}
+
+static int
+nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+
+ if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
+ return -EINVAL;
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ gpuobj->engine = NVOBJ_ENGINE_SW;
+ gpuobj->class = class;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
+static int
+nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *pramin = NULL;
+ uint32_t size;
+ uint32_t base;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ /* Base amount for object storage (4KiB enough?) */
+ size = 0x1000;
+ base = 0;
+
+ /* PGRAPH context */
+
+ if (dev_priv->card_type == NV_50) {
+ /* Various fixed table thingos */
+ size += 0x1400; /* mostly unknown stuff */
+ size += 0x4000; /* vm pd */
+ base = 0x6000;
+ /* RAMHT, not sure about setting size yet, 32KiB to be safe */
+ size += 0x8000;
+ /* RAMFC */
+ size += 0x1000;
+ /* PGRAPH context */
+ size += 0x70000;
+ }
+
+ NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
+ chan->id, size, base);
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
+ &chan->ramin);
+ if (ret) {
+ NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
+ return ret;
+ }
+ pramin = chan->ramin->gpuobj;
+
+ ret = nouveau_mem_init_heap(&chan->ramin_heap,
+ pramin->im_pramin->start + base, size);
+ if (ret) {
+ NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
+ uint32_t vram_h, uint32_t tt_h)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_gpuobj *vram = NULL, *tt = NULL;
+ int ret, i;
+
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
+ NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+
+ /* Reserve a block of PRAMIN for the channel
+ *XXX: maybe on <NV50 too at some point
+ */
+ if (0 || dev_priv->card_type == NV_50) {
+ ret = nouveau_gpuobj_channel_init_pramin(chan);
+ if (ret) {
+ NV_ERROR(dev, "init pramin\n");
+ return ret;
+ }
+ }
+
+ /* NV50 VM
+ * - Allocate per-channel page-directory
+ * - Map GART and VRAM into the channel's address space at the
+ * locations determined during init.
+ */
+ if (dev_priv->card_type >= NV_50) {
+ uint32_t vm_offset, pde;
+
+ instmem->prepare_access(dev, true);
+
+ vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
+ vm_offset += chan->ramin->gpuobj->im_pramin->start;
+
+ ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
+ 0, &chan->vm_pd, NULL);
+ if (ret) {
+ instmem->finish_access(dev);
+ return ret;
+ }
+ for (i = 0; i < 0x4000; i += 8) {
+ nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
+ nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
+ }
+
+ pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+ dev_priv->gart_info.sg_ctxdma,
+ &chan->vm_gart_pt);
+ if (ret) {
+ instmem->finish_access(dev);
+ return ret;
+ }
+ nv_wo32(dev, chan->vm_pd, pde++,
+ chan->vm_gart_pt->instance | 0x03);
+ nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+
+ pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+ dev_priv->vm_vram_pt[i],
+ &chan->vm_vram_pt[i]);
+ if (ret) {
+ instmem->finish_access(dev);
+ return ret;
+ }
+
+ nv_wo32(dev, chan->vm_pd, pde++,
+ chan->vm_vram_pt[i]->instance | 0x61);
+ nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+ }
+
+ instmem->finish_access(dev);
+ }
+
+ /* RAMHT */
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ } else {
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
+ 0x8000, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ }
+
+ /* VRAM ctxdma */
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->vm_end,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_AGP, &vram);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &vram);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ /* TT memory ctxdma */
+ if (dev_priv->card_type >= NV_50) {
+ tt = vram;
+ } else
+ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &tt, NULL);
+ } else {
+ NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_device *dev = chan->dev;
+ struct list_head *entry, *tmp;
+ struct nouveau_gpuobj_ref *ref;
+ int i;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ if (!chan->ramht_refs.next)
+ return;
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+ nouveau_gpuobj_ref_del(dev, &ref);
+ }
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramht);
+
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
+ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+
+ if (chan->ramin_heap)
+ nouveau_mem_takedown(&chan->ramin_heap);
+ if (chan->ramin)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+
+}
+
+int
+nouveau_gpuobj_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+ int i;
+
+ if (dev_priv->card_type < NV_50) {
+ dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
+ if (!dev_priv->susres.ramin_copy)
+ return -ENOMEM;
+
+ for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
+ dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
+ return 0;
+ }
+
+ list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+ if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
+ continue;
+
+ gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
+ if (!gpuobj->im_backing_suspend) {
+ nouveau_gpuobj_resume(dev);
+ return -ENOMEM;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+ for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
+ gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
+ dev_priv->engine.instmem.finish_access(dev);
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+
+ if (dev_priv->card_type < NV_50) {
+ vfree(dev_priv->susres.ramin_copy);
+ dev_priv->susres.ramin_copy = NULL;
+ return;
+ }
+
+ list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+ if (!gpuobj->im_backing_suspend)
+ continue;
+
+ vfree(gpuobj->im_backing_suspend);
+ gpuobj->im_backing_suspend = NULL;
+ }
+}
+
+void
+nouveau_gpuobj_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+ int i;
+
+ if (dev_priv->card_type < NV_50) {
+ for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
+ nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+ nouveau_gpuobj_suspend_cleanup(dev);
+ return;
+ }
+
+ list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+ if (!gpuobj->im_backing_suspend)
+ continue;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
+ nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
+ dev_priv->engine.instmem.finish_access(dev);
+ }
+
+ nouveau_gpuobj_suspend_cleanup(dev);
+}
+
+int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_grobj_alloc *init = data;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_pgraph_object_class *grc;
+ struct nouveau_gpuobj *gr = NULL;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
+
+ if (init->handle == ~0)
+ return -EINVAL;
+
+ grc = pgraph->grclass;
+ while (grc->id) {
+ if (grc->id == init->class)
+ break;
+ grc++;
+ }
+
+ if (!grc->id) {
+ NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
+ return -EPERM;
+ }
+
+ if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+ return -EEXIST;
+
+ if (!grc->software)
+ ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
+ else
+ ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+
+ if (ret) {
+ NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ nouveau_gpuobj_del(dev, &gr);
+ return ret;
+ }
+
+ return 0;
+}
+
+int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gpuobj_free *objfree = data;
+ struct nouveau_gpuobj_ref *ref;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+
+ ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
+ if (ret)
+ return ret;
+ nouveau_gpuobj_ref_del(dev, &ref);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 00000000000..fa1b0e7165b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,836 @@
+
+
+#define NV03_BOOT_0 0x00100000
+# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
+# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
+# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
+# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
+# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
+# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
+# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
+# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
+# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
+
+#define NV04_FIFO_DATA 0x0010020c
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+
+#define NV_RAMIN 0x00700000
+
+#define NV_RAMHT_HANDLE_OFFSET 0
+#define NV_RAMHT_CONTEXT_OFFSET 4
+# define NV_RAMHT_CONTEXT_VALID (1<<31)
+# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
+# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
+# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
+# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
+# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
+# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
+# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+
+/* DMA object defines */
+#define NV_DMA_ACCESS_RW 0
+#define NV_DMA_ACCESS_RO 1
+#define NV_DMA_ACCESS_WO 2
+#define NV_DMA_TARGET_VIDMEM 0
+#define NV_DMA_TARGET_PCI 2
+#define NV_DMA_TARGET_AGP 3
+/* The following is not a real value used by the card, it's changed by
+ * nouveau_object_dma_create */
+#define NV_DMA_TARGET_PCI_NONLINEAR 8
+
+/* Some object classes we care about in the drm */
+#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
+#define NV_CLASS_DMA_TO_MEMORY 0x00000003
+#define NV_CLASS_NULL 0x00000030
+#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
+
+#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE))
+#define NV03_USER__SIZE 16
+#define NV10_USER__SIZE 32
+#define NV03_USER_SIZE 0x00010000
+#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_PUT__SIZE 16
+#define NV10_USER_DMA_PUT__SIZE 32
+#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_GET__SIZE 16
+#define NV10_USER_DMA_GET__SIZE 32
+#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE))
+#define NV03_USER_REF_CNT__SIZE 16
+#define NV10_USER_REF_CNT__SIZE 32
+
+#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE))
+#define NV40_USER_SIZE 0x00001000
+#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_PUT__SIZE 32
+#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_GET__SIZE 32
+#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE))
+#define NV40_USER_REF_CNT__SIZE 32
+
+#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE))
+#define NV50_USER_SIZE 0x00002000
+#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_PUT__SIZE 128
+#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_GET__SIZE 128
+#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE))
+#define NV50_USER_REF_CNT__SIZE 128
+
+#define NV03_FIFO_SIZE 0x8000UL
+
+#define NV03_PMC_BOOT_0 0x00000000
+#define NV03_PMC_BOOT_1 0x00000004
+#define NV03_PMC_INTR_0 0x00000100
+# define NV_PMC_INTR_0_PFIFO_PENDING (1<<8)
+# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
+# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21)
+# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24)
+# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25)
+# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26)
+# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24)
+#define NV03_PMC_INTR_EN_0 0x00000140
+# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<<0)
+#define NV03_PMC_ENABLE 0x00000200
+# define NV_PMC_ENABLE_PFIFO (1<<8)
+# define NV_PMC_ENABLE_PGRAPH (1<<12)
+/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
+ * the card will hang early on in the X init process.
+ */
+# define NV_PMC_ENABLE_UNK13 (1<<13)
+#define NV40_PMC_BACKLIGHT 0x000015f0
+# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
+#define NV40_PMC_1700 0x00001700
+#define NV40_PMC_1704 0x00001704
+#define NV40_PMC_1708 0x00001708
+#define NV40_PMC_170C 0x0000170C
+
+/* probably PMC ? */
+#define NV50_PUNK_BAR0_PRAMIN 0x00001700
+#define NV50_PUNK_BAR_CFG_BASE 0x00001704
+#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
+#define NV50_PUNK_BAR1_CTXDMA 0x00001708
+#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
+#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_UNK1710 0x00001710
+
+#define NV04_PBUS_PCI_NV_1 0x00001804
+#define NV04_PBUS_PCI_NV_19 0x0000184C
+#define NV04_PBUS_PCI_NV_20 0x00001850
+# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
+# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
+
+#define NV04_PTIMER_INTR_0 0x00009100
+#define NV04_PTIMER_INTR_EN_0 0x00009140
+#define NV04_PTIMER_NUMERATOR 0x00009200
+#define NV04_PTIMER_DENOMINATOR 0x00009210
+#define NV04_PTIMER_TIME_0 0x00009400
+#define NV04_PTIMER_TIME_1 0x00009410
+#define NV04_PTIMER_ALARM_0 0x00009420
+
+#define NV04_PFB_CFG0 0x00100200
+#define NV04_PFB_CFG1 0x00100204
+#define NV40_PFB_020C 0x0010020C
+#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
+#define NV10_PFB_TILE__SIZE 8
+#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
+#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
+#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
+#define NV10_PFB_CLOSE_PAGE2 0x0010033C
+#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
+#define NV40_PFB_TILE__SIZE_0 12
+#define NV40_PFB_TILE__SIZE_1 15
+#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
+#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
+#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
+#define NV40_PFB_UNK_800 0x00100800
+
+#define NV04_PGRAPH_DEBUG_0 0x00400080
+#define NV04_PGRAPH_DEBUG_1 0x00400084
+#define NV04_PGRAPH_DEBUG_2 0x00400088
+#define NV04_PGRAPH_DEBUG_3 0x0040008c
+#define NV10_PGRAPH_DEBUG_4 0x00400090
+#define NV03_PGRAPH_INTR 0x00400100
+#define NV03_PGRAPH_NSTATUS 0x00400104
+# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
+# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
+# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
+# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
+# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
+# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
+# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
+# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
+#define NV03_PGRAPH_NSOURCE 0x00400108
+# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
+# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
+# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
+# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
+# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
+# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
+# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
+# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
+# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
+# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
+# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
+# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
+# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
+# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
+# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
+# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
+# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
+#define NV03_PGRAPH_INTR_EN 0x00400140
+#define NV40_PGRAPH_INTR_EN 0x0040013C
+# define NV_PGRAPH_INTR_NOTIFY (1<<0)
+# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
+# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
+# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
+# define NV_PGRAPH_INTR_ERROR (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL 0x00400144
+#define NV10_PGRAPH_CTX_USER 0x00400148
+#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
+#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
+#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
+#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
+#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
+#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
+#define NV10_PGRAPH_CTX_CACHE1 0x00400160
+#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL 0x00400170
+#define NV04_PGRAPH_CTX_USER 0x00400174
+#define NV04_PGRAPH_CTX_CACHE1 0x00400180
+#define NV10_PGRAPH_CTX_CACHE2 0x00400180
+#define NV03_PGRAPH_CTX_CONTROL 0x00400190
+#define NV03_PGRAPH_CTX_USER 0x00400194
+#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
+#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
+#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
+#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304 0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310 0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
+#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
+#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
+#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
+#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
+#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
+#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
+#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
+#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
+#define NV03_PGRAPH_ABS_X_RAM 0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
+#define NV03_PGRAPH_X_MISC 0x00400500
+#define NV03_PGRAPH_Y_MISC 0x00400504
+#define NV04_PGRAPH_VALID1 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
+#define NV04_PGRAPH_MISC24_0 0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
+#define NV03_PGRAPH_CLIPX_0 0x00400524
+#define NV03_PGRAPH_CLIPX_1 0x00400528
+#define NV03_PGRAPH_CLIPY_0 0x0040052C
+#define NV03_PGRAPH_CLIPY_1 0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
+#define NV04_PGRAPH_MISC24_1 0x00400570
+#define NV04_PGRAPH_MISC24_2 0x00400574
+#define NV04_PGRAPH_VALID2 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1 0x00400580
+#define NV04_PGRAPH_PASSTHRU_2 0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
+#define NV04_PGRAPH_FORMAT_0 0x004005A8
+#define NV04_PGRAPH_FORMAT_1 0x004005AC
+#define NV04_PGRAPH_FILTER_0 0x004005B0
+#define NV04_PGRAPH_FILTER_1 0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0 0x00400600
+#define NV04_PGRAPH_ROP3 0x00400604
+#define NV04_PGRAPH_BETA_AND 0x00400608
+#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
+#define NV04_PGRAPH_FORMATS 0x00400618
+#define NV10_PGRAPH_DEBUG_2 0x00400620
+#define NV04_PGRAPH_BOFFSET0 0x00400640
+#define NV04_PGRAPH_BOFFSET1 0x00400644
+#define NV04_PGRAPH_BOFFSET2 0x00400648
+#define NV04_PGRAPH_BOFFSET3 0x0040064C
+#define NV04_PGRAPH_BOFFSET4 0x00400650
+#define NV04_PGRAPH_BOFFSET5 0x00400654
+#define NV04_PGRAPH_BBASE0 0x00400658
+#define NV04_PGRAPH_BBASE1 0x0040065C
+#define NV04_PGRAPH_BBASE2 0x00400660
+#define NV04_PGRAPH_BBASE3 0x00400664
+#define NV04_PGRAPH_BBASE4 0x00400668
+#define NV04_PGRAPH_BBASE5 0x0040066C
+#define NV04_PGRAPH_BPITCH0 0x00400670
+#define NV04_PGRAPH_BPITCH1 0x00400674
+#define NV04_PGRAPH_BPITCH2 0x00400678
+#define NV04_PGRAPH_BPITCH3 0x0040067C
+#define NV04_PGRAPH_BPITCH4 0x00400680
+#define NV04_PGRAPH_BLIMIT0 0x00400684
+#define NV04_PGRAPH_BLIMIT1 0x00400688
+#define NV04_PGRAPH_BLIMIT2 0x0040068C
+#define NV04_PGRAPH_BLIMIT3 0x00400690
+#define NV04_PGRAPH_BLIMIT4 0x00400694
+#define NV04_PGRAPH_BLIMIT5 0x00400698
+#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
+#define NV03_PGRAPH_STATUS 0x004006B0
+#define NV04_PGRAPH_STATUS 0x00400700
+#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
+#define NV04_PGRAPH_SURFACE 0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
+#define NV04_PGRAPH_STATE 0x00400710
+#define NV10_PGRAPH_SURFACE 0x00400710
+#define NV04_PGRAPH_NOTIFY 0x00400714
+#define NV10_PGRAPH_STATE 0x00400714
+#define NV10_PGRAPH_NOTIFY 0x00400718
+
+#define NV04_PGRAPH_FIFO 0x00400720
+
+#define NV04_PGRAPH_BPIXEL 0x00400724
+#define NV10_PGRAPH_RDI_INDEX 0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
+#define NV10_PGRAPH_RDI_DATA 0x00400754
+#define NV04_PGRAPH_DMA_PITCH 0x00400760
+#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
+#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
+#define NV10_PGRAPH_DMA_PITCH 0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
+#define NV04_PGRAPH_PATT_COLOR0 0x00400800
+#define NV04_PGRAPH_PATT_COLOR1 0x00400804
+#define NV04_PGRAPH_PATTERN 0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
+#define NV04_PGRAPH_CHROMA 0x00400814
+#define NV04_PGRAPH_CONTROL0 0x00400818
+#define NV04_PGRAPH_CONTROL1 0x0040081C
+#define NV04_PGRAPH_CONTROL2 0x00400820
+#define NV04_PGRAPH_BLEND 0x00400824
+#define NV04_PGRAPH_STORED_FMT 0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
+#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
+#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
+#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
+#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
+#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM 0x00400D00
+#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM 0x00400D40
+#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
+#define NV10_PGRAPH_XFMODE0 0x00400F40
+#define NV10_PGRAPH_XFMODE1 0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
+#define NV10_PGRAPH_PIPE_DATA 0x00400F54
+#define NV04_PGRAPH_DMA_START_0 0x00401000
+#define NV04_PGRAPH_DMA_START_1 0x00401004
+#define NV04_PGRAPH_DMA_LENGTH 0x00401008
+#define NV04_PGRAPH_DMA_MISC 0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0 0x00401020
+#define NV04_PGRAPH_DMA_DATA_1 0x00401024
+#define NV04_PGRAPH_DMA_RM 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+
+
+/* It's a guess that this works on NV03. Confirmed on NV04, though */
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
+# define NV_PFIFO_INTR_RUNOUT (1<<4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+
+#define NV_CRTC0_INTSTAT 0x00600100
+#define NV_CRTC0_INTEN 0x00600140
+#define NV_CRTC1_INTSTAT 0x00602100
+#define NV_CRTC1_INTEN 0x00602140
+# define NV_CRTC_INTR_VBLANK (1<<0)
+
+#define NV04_PRAMIN 0x00700000
+
+/* Fifo commands. These are not regs, neither masks */
+#define NV03_FIFO_CMD_JUMP 0x20000000
+#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
+#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
+
+/* This is a partial import from rules-ng, a few things may be duplicated.
+ * Eventually we should completely import everything from rules-ng.
+ * For the moment check rules-ng for docs.
+ */
+
+#define NV50_PMC 0x00000000
+#define NV50_PMC__LEN 0x1
+#define NV50_PMC__ESIZE 0x2000
+# define NV50_PMC_BOOT_0 0x00000000
+# define NV50_PMC_BOOT_0_REVISION 0x000000ff
+# define NV50_PMC_BOOT_0_REVISION__SHIFT 0
+# define NV50_PMC_BOOT_0_ARCH 0x0ff00000
+# define NV50_PMC_BOOT_0_ARCH__SHIFT 20
+# define NV50_PMC_INTR_0 0x00000100
+# define NV50_PMC_INTR_0_PFIFO (1<<8)
+# define NV50_PMC_INTR_0_PGRAPH (1<<12)
+# define NV50_PMC_INTR_0_PTIMER (1<<20)
+# define NV50_PMC_INTR_0_HOTPLUG (1<<21)
+# define NV50_PMC_INTR_0_DISPLAY (1<<26)
+# define NV50_PMC_INTR_EN_0 0x00000140
+# define NV50_PMC_INTR_EN_0_MASTER (1<<0)
+# define NV50_PMC_INTR_EN_0_MASTER_DISABLED (0<<0)
+# define NV50_PMC_INTR_EN_0_MASTER_ENABLED (1<<0)
+# define NV50_PMC_ENABLE 0x00000200
+# define NV50_PMC_ENABLE_PFIFO (1<<8)
+# define NV50_PMC_ENABLE_PGRAPH (1<<12)
+
+#define NV50_PCONNECTOR 0x0000e000
+#define NV50_PCONNECTOR__LEN 0x1
+#define NV50_PCONNECTOR__ESIZE 0x1000
+# define NV50_PCONNECTOR_HOTPLUG_INTR 0x0000e050
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0 (1<<0)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1 (1<<1)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2 (1<<2)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3 (1<<3)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0 (1<<16)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1 (1<<17)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2 (1<<18)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3 (1<<19)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL 0x0000e054
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0 (1<<0)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1 (1<<1)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2 (1<<2)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3 (1<<3)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0 (1<<16)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1 (1<<17)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2 (1<<18)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3 (1<<19)
+# define NV50_PCONNECTOR_HOTPLUG_STATE 0x0000e104
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
+# define NV50_PCONNECTOR_I2C_PORT_0 0x0000e138
+# define NV50_PCONNECTOR_I2C_PORT_1 0x0000e150
+# define NV50_PCONNECTOR_I2C_PORT_2 0x0000e168
+# define NV50_PCONNECTOR_I2C_PORT_3 0x0000e180
+# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240
+# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258
+
+#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
+#define NV50_AUXCH_DATA_OUT__SIZE 4
+#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
+#define NV50_AUXCH_DATA_IN__SIZE 4
+#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0)
+#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4)
+#define NV50_AUXCH_CTRL_LINKSTAT 0x01000000
+#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY 0x00000000
+#define NV50_AUXCH_CTRL_LINKSTAT_READY 0x01000000
+#define NV50_AUXCH_CTRL_LINKEN 0x00100000
+#define NV50_AUXCH_CTRL_LINKEN_DISABLED 0x00000000
+#define NV50_AUXCH_CTRL_LINKEN_ENABLED 0x00100000
+#define NV50_AUXCH_CTRL_EXEC 0x00010000
+#define NV50_AUXCH_CTRL_EXEC_COMPLETE 0x00000000
+#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS 0x00010000
+#define NV50_AUXCH_CTRL_CMD 0x0000f000
+#define NV50_AUXCH_CTRL_CMD_SHIFT 12
+#define NV50_AUXCH_CTRL_LEN 0x0000000f
+#define NV50_AUXCH_CTRL_LEN_SHIFT 0
+#define NV50_AUXCH_STAT(i) ((i) * 0x50 + 0x0000e4e8)
+#define NV50_AUXCH_STAT_STATE 0x10000000
+#define NV50_AUXCH_STAT_STATE_NOT_READY 0x00000000
+#define NV50_AUXCH_STAT_STATE_READY 0x10000000
+#define NV50_AUXCH_STAT_REPLY 0x000f0000
+#define NV50_AUXCH_STAT_REPLY_AUX 0x00030000
+#define NV50_AUXCH_STAT_REPLY_AUX_ACK 0x00000000
+#define NV50_AUXCH_STAT_REPLY_AUX_NACK 0x00010000
+#define NV50_AUXCH_STAT_REPLY_AUX_DEFER 0x00020000
+#define NV50_AUXCH_STAT_REPLY_I2C 0x000c0000
+#define NV50_AUXCH_STAT_REPLY_I2C_ACK 0x00000000
+#define NV50_AUXCH_STAT_REPLY_I2C_NACK 0x00040000
+#define NV50_AUXCH_STAT_REPLY_I2C_DEFER 0x00080000
+#define NV50_AUXCH_STAT_COUNT 0x0000001f
+
+#define NV50_PBUS 0x00088000
+#define NV50_PBUS__LEN 0x1
+#define NV50_PBUS__ESIZE 0x1000
+# define NV50_PBUS_PCI_ID 0x00088000
+# define NV50_PBUS_PCI_ID_VENDOR_ID 0x0000ffff
+# define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT 0
+# define NV50_PBUS_PCI_ID_DEVICE_ID 0xffff0000
+# define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT 16
+
+#define NV50_PFB 0x00100000
+#define NV50_PFB__LEN 0x1
+#define NV50_PFB__ESIZE 0x1000
+
+#define NV50_PEXTDEV 0x00101000
+#define NV50_PEXTDEV__LEN 0x1
+#define NV50_PEXTDEV__ESIZE 0x1000
+
+#define NV50_PROM 0x00300000
+#define NV50_PROM__LEN 0x1
+#define NV50_PROM__ESIZE 0x10000
+
+#define NV50_PGRAPH 0x00400000
+#define NV50_PGRAPH__LEN 0x1
+#define NV50_PGRAPH__ESIZE 0x10000
+
+#define NV50_PDISPLAY 0x00610000
+#define NV50_PDISPLAY_OBJECTS 0x00610010
+#define NV50_PDISPLAY_INTR_0 0x00610020
+#define NV50_PDISPLAY_INTR_1 0x00610024
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC 0x0000000c
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT 2
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0 0x00000004
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1 0x00000008
+#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
+#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
+#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
+#define NV50_PDISPLAY_INTR_EN 0x0061002c
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
+#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
+#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
+#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
+#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
+#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
+#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
+
+#define NV50_PDISPLAY_CURSOR 0x00610270
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON 0x00000001
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
+
+#define NV50_PDISPLAY_CTRL_STATE 0x00610300
+#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
+#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
+#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
+#define NV50_PDISPLAY_CTRL_VAL 0x00610304
+#define NV50_PDISPLAY_UNK_380 0x00610380
+#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
+#define NV50_PDISPLAY_UNK_388 0x00610388
+#define NV50_PDISPLAY_UNK_38C 0x0061038c
+
+#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */ 0x00610a18
+#define NV50_PDISPLAY_CRTC_CLUT_MODE 0x00610a24
+#define NV50_PDISPLAY_CRTC_INTERLACE 0x00610a48
+#define NV50_PDISPLAY_CRTC_SCALE_CTRL 0x00610a50
+#define NV50_PDISPLAY_CRTC_CURSOR_CTRL 0x00610a58
+#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */ 0x00610a78
+#define NV50_PDISPLAY_CRTC_UNK0AB8 0x00610ab8
+#define NV50_PDISPLAY_CRTC_DEPTH 0x00610ac8
+#define NV50_PDISPLAY_CRTC_CLOCK 0x00610ad0
+#define NV50_PDISPLAY_CRTC_COLOR_CTRL 0x00610ae0
+#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END 0x00610ae8
+#define NV50_PDISPLAY_CRTC_MODE_UNK1 0x00610af0
+#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL 0x00610af8
+#define NV50_PDISPLAY_CRTC_SYNC_DURATION 0x00610b00
+#define NV50_PDISPLAY_CRTC_MODE_UNK2 0x00610b08
+#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */ 0x00610b10
+#define NV50_PDISPLAY_CRTC_FB_SIZE 0x00610b18
+#define NV50_PDISPLAY_CRTC_FB_PITCH 0x00610b20
+#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR 0x00100000
+#define NV50_PDISPLAY_CRTC_FB_POS 0x00610b28
+#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET 0x00610b38
+#define NV50_PDISPLAY_CRTC_REAL_RES 0x00610b40
+#define NV50_PDISPLAY_CRTC_SCALE_RES1 0x00610b48
+#define NV50_PDISPLAY_CRTC_SCALE_RES2 0x00610b50
+
+#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
+
+#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
+#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
+
+#define NV50_PDISPLAY_CRTC_CLK 0x00614000
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED 0x00000600
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i) ((i) * 0x800 + 0x614104)
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i) ((i) * 0x800 + 0x614108)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i) ((i) * 0x800 + 0x614200)
+
+#define NV50_PDISPLAY_DAC_CLK 0x00614000
+#define NV50_PDISPLAY_DAC_CLK_CTRL2(i) ((i) * 0x800 + 0x614280)
+
+#define NV50_PDISPLAY_SOR_CLK 0x00614000
+#define NV50_PDISPLAY_SOR_CLK_CTRL2(i) ((i) * 0x800 + 0x614300)
+
+#define NV50_PDISPLAY_VGACRTC(r) ((r) + 0x619400)
+
+#define NV50_PDISPLAY_DAC 0x0061a000
+#define NV50_PDISPLAY_DAC_DPMS_CTRL(i) (0x0061a004 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF 0x00000001
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF 0x00000004
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED 0x00000010
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF 0x00000040
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL(i) (0x0061a00c + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE 0x00100000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT 0x38000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE 0x80000000
+#define NV50_PDISPLAY_DAC_CLK_CTRL1(i) (0x0061a010 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED 0x00000600
+
+#define NV50_PDISPLAY_SOR 0x0061c000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL(i) (0x0061c004 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON 0x00000001
+#define NV50_PDISPLAY_SOR_CLK_CTRL1(i) (0x0061c008 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED 0x00000600
+#define NV50_PDISPLAY_SOR_DPMS_STATE(i) (0x0061c030 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
+#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
+#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
+#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
+#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
+#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
+#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000
+#define NV50_SOR_DP_CTRL_LANE_1_ENABLED 0x00020000
+#define NV50_SOR_DP_CTRL_LANE_2_ENABLED 0x00040000
+#define NV50_SOR_DP_CTRL_LANE_3_ENABLED 0x00080000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN 0x0f000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
+#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
+
+#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_GET(i) ((i) * 0x1000 + 0x00640004)
+
+#define NV50_PDISPLAY_CURSOR_USER 0x00647000
+#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i) ((i) * 0x1000 + 0x00647080)
+#define NV50_PDISPLAY_CURSOR_USER_POS(i) ((i) * 0x1000 + 0x00647084)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 00000000000..4c7f1e403e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,321 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <linux/pagemap.h>
+
+#define NV_CTXDMA_PAGE_SHIFT 12
+#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
+#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
+
+struct nouveau_sgdma_be {
+ struct ttm_backend backend;
+ struct drm_device *dev;
+
+ dma_addr_t *pages;
+ unsigned nr_pages;
+
+ unsigned pte_start;
+ bool bound;
+};
+
+static int
+nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
+ struct page **pages, struct page *dummy_read_page)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+
+ NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
+
+ if (nvbe->pages)
+ return -EINVAL;
+
+ nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
+ if (!nvbe->pages)
+ return -ENOMEM;
+
+ nvbe->nr_pages = 0;
+ while (num_pages--) {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
+
+ nvbe->nr_pages++;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sgdma_clear(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe && nvbe->pages) {
+ if (nvbe->bound)
+ be->func->unbind(be);
+
+ while (nvbe->nr_pages--) {
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ kfree(nvbe->pages);
+ nvbe->pages = NULL;
+ nvbe->nr_pages = 0;
+ }
+}
+
+static inline unsigned
+nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+
+ if (dev_priv->card_type < NV_50)
+ return pte + 2;
+
+ return pte << 1;
+}
+
+static int
+nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ unsigned i, j, pte;
+
+ NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+
+ dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
+ pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+ nvbe->pte_start = pte;
+ for (i = 0; i < nvbe->nr_pages; i++) {
+ dma_addr_t dma_offset = nvbe->pages[i];
+ uint32_t offset_l = lower_32_bits(dma_offset);
+ uint32_t offset_h = upper_32_bits(dma_offset);
+
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
+ if (dev_priv->card_type < NV_50)
+ nv_wo32(dev, gpuobj, pte++, offset_l | 3);
+ else {
+ nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
+ nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+ }
+
+ dma_offset += NV_CTXDMA_PAGE_SIZE;
+ }
+ }
+ dev_priv->engine.instmem.finish_access(nvbe->dev);
+
+ if (dev_priv->card_type == NV_50) {
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+ nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+ nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+ }
+
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nouveau_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ unsigned i, j, pte;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!nvbe->bound)
+ return 0;
+
+ dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
+ pte = nvbe->pte_start;
+ for (i = 0; i < nvbe->nr_pages; i++) {
+ dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
+
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
+ if (dev_priv->card_type < NV_50)
+ nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
+ else {
+ nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
+ nv_wo32(dev, gpuobj, pte++, 0x00000000);
+ }
+
+ dma_offset += NV_CTXDMA_PAGE_SIZE;
+ }
+ }
+ dev_priv->engine.instmem.finish_access(nvbe->dev);
+
+ nvbe->bound = false;
+ return 0;
+}
+
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+ if (be) {
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe) {
+ if (nvbe->pages)
+ be->func->clear(be);
+ kfree(nvbe);
+ }
+ }
+}
+
+static struct ttm_backend_func nouveau_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nouveau_sgdma_bind,
+ .unbind = nouveau_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+struct ttm_backend *
+nouveau_sgdma_init_ttm(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_sgdma_be *nvbe;
+
+ if (!dev_priv->gart_info.sg_ctxdma)
+ return NULL;
+
+ nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
+ if (!nvbe)
+ return NULL;
+
+ nvbe->dev = dev;
+
+ nvbe->backend.func = &nouveau_sgdma_backend;
+
+ return &nvbe->backend;
+}
+
+int
+nouveau_sgdma_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ uint32_t aper_size, obj_size;
+ int i, ret;
+
+ if (dev_priv->card_type < NV_50) {
+ aper_size = (64 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
+ obj_size += 8; /* ctxdma header */
+ } else {
+ /* 1 entire VM page table */
+ aper_size = (512 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
+ }
+
+ ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+ NVOBJ_FLAG_ALLOW_NO_REFS |
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->gart_info.sg_dummy_page =
+ alloc_page(GFP_KERNEL|__GFP_DMA32);
+ set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
+ dev_priv->gart_info.sg_dummy_bus =
+ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ if (dev_priv->card_type < NV_50) {
+ /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
+ * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
+ * on those cards? */
+ nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+ (1 << 12) /* PT present */ |
+ (0 << 13) /* PT *not* linear */ |
+ (NV_DMA_ACCESS_RW << 14) |
+ (NV_DMA_TARGET_PCI << 16));
+ nv_wo32(dev, gpuobj, 1, aper_size - 1);
+ for (i = 2; i < 2 + (aper_size >> 12); i++) {
+ nv_wo32(dev, gpuobj, i,
+ dev_priv->gart_info.sg_dummy_bus | 3);
+ }
+ } else {
+ for (i = 0; i < obj_size; i += 8) {
+ nv_wo32(dev, gpuobj, (i+0)/4,
+ dev_priv->gart_info.sg_dummy_bus | 0x21);
+ nv_wo32(dev, gpuobj, (i+4)/4, 0);
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ return 0;
+}
+
+void
+nouveau_sgdma_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->gart_info.sg_dummy_page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
+ NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ unlock_page(dev_priv->gart_info.sg_dummy_page);
+ __free_page(dev_priv->gart_info.sg_dummy_page);
+ dev_priv->gart_info.sg_dummy_page = NULL;
+ dev_priv->gart_info.sg_dummy_bus = 0;
+ }
+
+ nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+}
+
+int
+nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ int pte;
+
+ pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ if (dev_priv->card_type < NV_50) {
+ instmem->prepare_access(dev, false);
+ *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+ instmem->finish_access(dev);
+ return 0;
+ }
+
+ NV_ERROR(dev, "Unimplemented on NV50\n");
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 00000000000..2ed41d339f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright 2005 Stephane Marchesin
+ * Copyright 2008 Stuart Bennett
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/swab.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include <linux/vgaarb.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nv50_display.h"
+
+static int nouveau_stub_init(struct drm_device *dev) { return 0; }
+static void nouveau_stub_takedown(struct drm_device *dev) {}
+
+static int nouveau_init_engine_ptrs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv04_fb_init;
+ engine->fb.takedown = nv04_fb_takedown;
+ engine->graph.grclass = nv04_graph_grclass;
+ engine->graph.init = nv04_graph_init;
+ engine->graph.takedown = nv04_graph_takedown;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.channel = nv04_graph_channel;
+ engine->graph.create_context = nv04_graph_create_context;
+ engine->graph.destroy_context = nv04_graph_destroy_context;
+ engine->graph.load_context = nv04_graph_load_context;
+ engine->graph.unload_context = nv04_graph_unload_context;
+ engine->fifo.channels = 16;
+ engine->fifo.init = nv04_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv04_fifo_channel_id;
+ engine->fifo.create_context = nv04_fifo_create_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
+ engine->fifo.load_context = nv04_fifo_load_context;
+ engine->fifo.unload_context = nv04_fifo_unload_context;
+ break;
+ case 0x10:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.grclass = nv10_graph_grclass;
+ engine->graph.init = nv10_graph_init;
+ engine->graph.takedown = nv10_graph_takedown;
+ engine->graph.channel = nv10_graph_channel;
+ engine->graph.create_context = nv10_graph_create_context;
+ engine->graph.destroy_context = nv10_graph_destroy_context;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.load_context = nv10_graph_load_context;
+ engine->graph.unload_context = nv10_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv10_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.unload_context = nv10_fifo_unload_context;
+ break;
+ case 0x20:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.grclass = nv20_graph_grclass;
+ engine->graph.init = nv20_graph_init;
+ engine->graph.takedown = nv20_graph_takedown;
+ engine->graph.channel = nv10_graph_channel;
+ engine->graph.create_context = nv20_graph_create_context;
+ engine->graph.destroy_context = nv20_graph_destroy_context;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.load_context = nv20_graph_load_context;
+ engine->graph.unload_context = nv20_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv10_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.unload_context = nv10_fifo_unload_context;
+ break;
+ case 0x30:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.grclass = nv30_graph_grclass;
+ engine->graph.init = nv30_graph_init;
+ engine->graph.takedown = nv20_graph_takedown;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.channel = nv10_graph_channel;
+ engine->graph.create_context = nv20_graph_create_context;
+ engine->graph.destroy_context = nv20_graph_destroy_context;
+ engine->graph.load_context = nv20_graph_load_context;
+ engine->graph.unload_context = nv20_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv10_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.unload_context = nv10_fifo_unload_context;
+ break;
+ case 0x40:
+ case 0x60:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv40_mc_init;
+ engine->mc.takedown = nv40_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv40_fb_init;
+ engine->fb.takedown = nv40_fb_takedown;
+ engine->graph.grclass = nv40_graph_grclass;
+ engine->graph.init = nv40_graph_init;
+ engine->graph.takedown = nv40_graph_takedown;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.channel = nv40_graph_channel;
+ engine->graph.create_context = nv40_graph_create_context;
+ engine->graph.destroy_context = nv40_graph_destroy_context;
+ engine->graph.load_context = nv40_graph_load_context;
+ engine->graph.unload_context = nv40_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv40_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv40_fifo_create_context;
+ engine->fifo.destroy_context = nv40_fifo_destroy_context;
+ engine->fifo.load_context = nv40_fifo_load_context;
+ engine->fifo.unload_context = nv40_fifo_unload_context;
+ break;
+ case 0x50:
+ case 0x80: /* gotta love NVIDIA's consistency.. */
+ case 0x90:
+ case 0xA0:
+ engine->instmem.init = nv50_instmem_init;
+ engine->instmem.takedown = nv50_instmem_takedown;
+ engine->instmem.suspend = nv50_instmem_suspend;
+ engine->instmem.resume = nv50_instmem_resume;
+ engine->instmem.populate = nv50_instmem_populate;
+ engine->instmem.clear = nv50_instmem_clear;
+ engine->instmem.bind = nv50_instmem_bind;
+ engine->instmem.unbind = nv50_instmem_unbind;
+ engine->instmem.prepare_access = nv50_instmem_prepare_access;
+ engine->instmem.finish_access = nv50_instmem_finish_access;
+ engine->mc.init = nv50_mc_init;
+ engine->mc.takedown = nv50_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nouveau_stub_init;
+ engine->fb.takedown = nouveau_stub_takedown;
+ engine->graph.grclass = nv50_graph_grclass;
+ engine->graph.init = nv50_graph_init;
+ engine->graph.takedown = nv50_graph_takedown;
+ engine->graph.fifo_access = nv50_graph_fifo_access;
+ engine->graph.channel = nv50_graph_channel;
+ engine->graph.create_context = nv50_graph_create_context;
+ engine->graph.destroy_context = nv50_graph_destroy_context;
+ engine->graph.load_context = nv50_graph_load_context;
+ engine->graph.unload_context = nv50_graph_unload_context;
+ engine->fifo.channels = 128;
+ engine->fifo.init = nv50_fifo_init;
+ engine->fifo.takedown = nv50_fifo_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv50_fifo_channel_id;
+ engine->fifo.create_context = nv50_fifo_create_context;
+ engine->fifo.destroy_context = nv50_fifo_destroy_context;
+ engine->fifo.load_context = nv50_fifo_load_context;
+ engine->fifo.unload_context = nv50_fifo_unload_context;
+ break;
+ default:
+ NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
+ return 1;
+ }
+
+ return 0;
+}
+
+static unsigned int
+nouveau_vga_set_decode(void *priv, bool state)
+{
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+int
+nouveau_card_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
+ return 0;
+
+ vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+
+ /* Initialise internal driver API hooks */
+ ret = nouveau_init_engine_ptrs(dev);
+ if (ret)
+ return ret;
+ engine = &dev_priv->engine;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+
+ /* Parse BIOS tables / Run init tables if card not POSTed */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = nouveau_bios_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_early_init(dev);
+ if (ret)
+ return ret;
+
+ /* Initialise instance memory, must happen before mem_init so we
+ * know exactly how much VRAM we're able to use for "normal"
+ * purposes.
+ */
+ ret = engine->instmem.init(dev);
+ if (ret)
+ return ret;
+
+ /* Setup the memory manager */
+ ret = nouveau_mem_init(dev);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_init(dev);
+ if (ret)
+ return ret;
+
+ /* PMC */
+ ret = engine->mc.init(dev);
+ if (ret)
+ return ret;
+
+ /* PTIMER */
+ ret = engine->timer.init(dev);
+ if (ret)
+ return ret;
+
+ /* PFB */
+ ret = engine->fb.init(dev);
+ if (ret)
+ return ret;
+
+ /* PGRAPH */
+ ret = engine->graph.init(dev);
+ if (ret)
+ return ret;
+
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret)
+ return ret;
+
+ /* this call irq_preinstall, register irq handler and
+ * call irq_postinstall
+ */
+ ret = drm_irq_install(dev);
+ if (ret)
+ return ret;
+
+ ret = drm_vblank_init(dev, 0);
+ if (ret)
+ return ret;
+
+ /* what about PVIDEO/PCRTC/PRAMDAC etc? */
+
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel,
+ (struct drm_file *)-2,
+ NvDmaFB, NvDmaTT);
+ if (ret)
+ return ret;
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+ 0, nouveau_mem_fb_amount(dev),
+ NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+ &gpuobj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
+ gpuobj, NULL);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &gpuobj, NULL);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
+ gpuobj, NULL);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (dev_priv->card_type >= NV_50) {
+ ret = nv50_display_create(dev);
+ if (ret)
+ return ret;
+ } else {
+ ret = nv04_display_create(dev);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = nouveau_backlight_init(dev);
+ if (ret)
+ NV_ERROR(dev, "Error %d registering backlight\n", ret);
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_helper_initial_config(dev);
+
+ return 0;
+}
+
+static void nouveau_card_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+ nouveau_backlight_exit(dev);
+
+ if (dev_priv->channel) {
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ }
+
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->mc.takedown(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
+ mutex_unlock(&dev->struct_mutex);
+ nouveau_sgdma_takedown(dev);
+
+ nouveau_gpuobj_takedown(dev);
+ nouveau_mem_close(dev);
+ engine->instmem.takedown(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_uninstall(dev);
+
+ nouveau_gpuobj_late_takedown(dev);
+ nouveau_bios_takedown(dev);
+
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+ }
+}
+
+/* here a client dies, release the stuff that was allocated for its
+ * file_priv */
+void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ nouveau_channel_cleanup(dev, file_priv);
+}
+
+/* first module load, setup the mmio/fb mapping */
+/* KMS: we need mmio at load time, not when the first drm client opens. */
+int nouveau_firstopen(struct drm_device *dev)
+{
+ return 0;
+}
+
+/* if we have an OF card, copy vbios to RAMIN */
+static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
+{
+#if defined(__powerpc__)
+ int size, i;
+ const uint32_t *bios;
+ struct device_node *dn = pci_device_to_OF_node(dev->pdev);
+ if (!dn) {
+ NV_INFO(dev, "Unable to get the OF node\n");
+ return;
+ }
+
+ bios = of_get_property(dn, "NVDA,BMP", &size);
+ if (bios) {
+ for (i = 0; i < size; i += 4)
+ nv_wi32(dev, i, bios[i/4]);
+ NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
+ } else {
+ NV_INFO(dev, "Unable to get the OF bios\n");
+ }
+#endif
+}
+
+int nouveau_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_nouveau_private *dev_priv;
+ uint32_t reg0;
+ resource_size_t mmio_start_offs;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (!dev_priv)
+ return -ENOMEM;
+ dev->dev_private = dev_priv;
+ dev_priv->dev = dev;
+
+ dev_priv->flags = flags & NOUVEAU_FLAGS;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+
+ NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
+ dev->pci_vendor, dev->pci_device, dev->pdev->class);
+
+ dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
+
+ if (dev_priv->acpi_dsm)
+ nouveau_hybrid_setup(dev);
+
+ dev_priv->wq = create_workqueue("nouveau");
+ if (!dev_priv->wq)
+ return -EINVAL;
+
+ /* resource 0 is mmio regs */
+ /* resource 1 is linear FB */
+ /* resource 2 is RAMIN (mmio regs + 0x1000000) */
+ /* resource 6 is bios */
+
+ /* map the mmio regs */
+ mmio_start_offs = pci_resource_start(dev->pdev, 0);
+ dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
+ if (!dev_priv->mmio) {
+ NV_ERROR(dev, "Unable to initialize the mmio mapping. "
+ "Please report your setup to " DRIVER_EMAIL "\n");
+ return -EINVAL;
+ }
+ NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
+ (unsigned long long)mmio_start_offs);
+
+#ifdef __BIG_ENDIAN
+ /* Put the card in BE mode if it's not */
+ if (nv_rd32(dev, NV03_PMC_BOOT_1))
+ nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
+
+ DRM_MEMORYBARRIER();
+#endif
+
+ /* Time to determine the card architecture */
+ reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
+
+ /* We're dealing with >=NV10 */
+ if ((reg0 & 0x0f000000) > 0) {
+ /* Bit 27-20 contain the architecture in hex */
+ dev_priv->chipset = (reg0 & 0xff00000) >> 20;
+ /* NV04 or NV05 */
+ } else if ((reg0 & 0xff00fff0) == 0x20004000) {
+ dev_priv->chipset = 0x04;
+ } else
+ dev_priv->chipset = 0xff;
+
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+ case 0x10:
+ case 0x20:
+ case 0x30:
+ dev_priv->card_type = dev_priv->chipset & 0xf0;
+ break;
+ case 0x40:
+ case 0x60:
+ dev_priv->card_type = NV_40;
+ break;
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ case 0xa0:
+ dev_priv->card_type = NV_50;
+ break;
+ default:
+ NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
+ return -EINVAL;
+ }
+
+ NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ dev_priv->card_type, reg0);
+
+ /* map larger RAMIN aperture on NV40 cards */
+ dev_priv->ramin = NULL;
+ if (dev_priv->card_type >= NV_40) {
+ int ramin_bar = 2;
+ if (pci_resource_len(dev->pdev, ramin_bar) == 0)
+ ramin_bar = 3;
+
+ dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
+ dev_priv->ramin = ioremap(
+ pci_resource_start(dev->pdev, ramin_bar),
+ dev_priv->ramin_size);
+ if (!dev_priv->ramin) {
+ NV_ERROR(dev, "Failed to init RAMIN mapping, "
+ "limited instance memory available\n");
+ }
+ }
+
+ /* On older cards (or if the above failed), create a map covering
+ * the BAR0 PRAMIN aperture */
+ if (!dev_priv->ramin) {
+ dev_priv->ramin_size = 1 * 1024 * 1024;
+ dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
+ dev_priv->ramin_size);
+ if (!dev_priv->ramin) {
+ NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
+ return -ENOMEM;
+ }
+ }
+
+ nouveau_OF_copy_vbios_to_ramin(dev);
+
+ /* Special flags */
+ if (dev->pci_device == 0x01a0)
+ dev_priv->flags |= NV_NFORCE;
+ else if (dev->pci_device == 0x01f0)
+ dev_priv->flags |= NV_NFORCE2;
+
+ /* For kernel modesetting, init card now and bring up fbcon */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ int ret = nouveau_card_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nouveau_close(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* In the case of an error dev_priv may not be be allocated yet */
+ if (dev_priv && dev_priv->card_type)
+ nouveau_card_takedown(dev);
+}
+
+/* KMS: we need mmio at load time, not when the first drm client opens. */
+void nouveau_lastclose(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ nouveau_close(dev);
+}
+
+int nouveau_unload(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (dev_priv->card_type >= NV_50)
+ nv50_display_destroy(dev);
+ else
+ nv04_display_destroy(dev);
+ nouveau_close(dev);
+ }
+
+ iounmap(dev_priv->mmio);
+ iounmap(dev_priv->ramin);
+
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int
+nouveau_ioctl_card_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return nouveau_card_init(dev);
+}
+
+int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_getparam *getparam = data;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ switch (getparam->param) {
+ case NOUVEAU_GETPARAM_CHIPSET_ID:
+ getparam->value = dev_priv->chipset;
+ break;
+ case NOUVEAU_GETPARAM_PCI_VENDOR:
+ getparam->value = dev->pci_vendor;
+ break;
+ case NOUVEAU_GETPARAM_PCI_DEVICE:
+ getparam->value = dev->pci_device;
+ break;
+ case NOUVEAU_GETPARAM_BUS_TYPE:
+ if (drm_device_is_agp(dev))
+ getparam->value = NV_AGP;
+ else if (drm_device_is_pcie(dev))
+ getparam->value = NV_PCIE;
+ else
+ getparam->value = NV_PCI;
+ break;
+ case NOUVEAU_GETPARAM_FB_PHYSICAL:
+ getparam->value = dev_priv->fb_phys;
+ break;
+ case NOUVEAU_GETPARAM_AGP_PHYSICAL:
+ getparam->value = dev_priv->gart_info.aper_base;
+ break;
+ case NOUVEAU_GETPARAM_PCI_PHYSICAL:
+ if (dev->sg) {
+ getparam->value = (unsigned long)dev->sg->virtual;
+ } else {
+ NV_ERROR(dev, "Requested PCIGART address, "
+ "while no PCIGART was created\n");
+ return -EINVAL;
+ }
+ break;
+ case NOUVEAU_GETPARAM_FB_SIZE:
+ getparam->value = dev_priv->fb_available_size;
+ break;
+ case NOUVEAU_GETPARAM_AGP_SIZE:
+ getparam->value = dev_priv->gart_info.aper_size;
+ break;
+ case NOUVEAU_GETPARAM_VM_VRAM_BASE:
+ getparam->value = dev_priv->vm_vram_base;
+ break;
+ default:
+ NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+nouveau_ioctl_setparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_setparam *setparam = data;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ switch (setparam->param) {
+ default:
+ NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Wait until (value(reg) & mask) == val, up until timeout has hit */
+bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if ((nv_rd32(dev, reg) & mask) == val)
+ return true;
+ } while (ptimer->read(dev) - start < timeout);
+
+ return false;
+}
+
+/* Waits for PGRAPH to go completely idle */
+bool nouveau_wait_for_idle(struct drm_device *dev)
+{
+ if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
+ nv_rd32(dev, NV04_PGRAPH_STATUS));
+ return false;
+ }
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
new file mode 100644
index 00000000000..187eb84e4da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+static struct vm_operations_struct nouveau_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int
+nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ int ret;
+
+ if (unlikely(bo == NULL))
+ return VM_FAULT_NOPAGE;
+
+ ret = ttm_vm_ops->fault(vma, vmf);
+ return ret;
+}
+
+int
+nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_nouveau_private *dev_priv =
+ file_priv->minor->dev->dev_private;
+ int ret;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ nouveau_ttm_vm_ops = *ttm_vm_ops;
+ nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
+ }
+
+ vma->vm_ops = &nouveau_ttm_vm_ops;
+ return 0;
+}
+
+static int
+nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+int
+nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
+{
+ struct ttm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &dev_priv->ttm.mem_global_ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &nouveau_ttm_mem_global_init;
+ global_ref->release = &nouveau_ttm_mem_global_release;
+
+ ret = ttm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM memory accounting\n");
+ dev_priv->ttm.mem_global_ref.release = NULL;
+ return ret;
+ }
+
+ dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
+ global_ref = &dev_priv->ttm.bo_global_ref.ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+
+ ret = ttm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM BO subsystem\n");
+ ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ dev_priv->ttm.mem_global_ref.release = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
+{
+ if (dev_priv->ttm.mem_global_ref.release == NULL)
+ return;
+
+ ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
+ ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ dev_priv->ttm.mem_global_ref.release = NULL;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
new file mode 100644
index 00000000000..b9136360605
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -0,0 +1,1002 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+ NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+ crtcstate->CRTC[index]);
+}
+
+static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+ regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
+ if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
+ regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
+ regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
+ }
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
+}
+
+static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+ nv_crtc->sharpness = level;
+ if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
+ level += 0x40;
+ regp->ramdac_634 = level;
+ NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
+}
+
+#define PLLSEL_VPLL1_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
+#define PLLSEL_VPLL2_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
+#define PLLSEL_TV_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+/* NV4x 0x40.. pll notes:
+ * gpu pll: 0x4000 + 0x4004
+ * ?gpu? pll: 0x4008 + 0x400c
+ * vpll1: 0x4010 + 0x4014
+ * vpll2: 0x4018 + 0x401c
+ * mpll: 0x4020 + 0x4024
+ * mpll: 0x4038 + 0x403c
+ *
+ * the first register of each pair has some unknown details:
+ * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
+ * bits 20-23: (mpll) something to do with post divider?
+ * bits 28-31: related to single stage mode? (bit 8/12)
+ */
+
+static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
+ struct nouveau_pll_vals *pv = &regp->pllvals;
+ struct pll_lims pll_lim;
+
+ if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
+ return;
+
+ /* NM2 == 0 is used to determine single stage mode on two stage plls */
+ pv->NM2 = 0;
+
+ /* for newer nv4x the blob uses only the first stage of the vpll below a
+ * certain clock. for a certain nv4b this is 150MHz. since the max
+ * output frequency of the first stage for this card is 300MHz, it is
+ * assumed the threshold is given by vco1 maxfreq/2
+ */
+ /* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
+ * not 8, others unknown), the blob always uses both plls. no problem
+ * has yet been observed in allowing the use a single stage pll on all
+ * nv43 however. the behaviour of single stage use is untested on nv40
+ */
+ if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
+ memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
+
+ if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
+ return;
+
+ state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
+
+ /* The blob uses this always, so let's do the same */
+ if (dev_priv->card_type == NV_40)
+ state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
+ /* again nv40 and some nv43 act more like nv3x as described above */
+ if (dev_priv->chipset < 0x41)
+ state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
+ NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
+ state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
+
+ if (pv->NM2)
+ NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
+ pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
+ else
+ NV_TRACE(dev, "vpll: n %d m %d log2p %d\n",
+ pv->N1, pv->M1, pv->log2P);
+
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+}
+
+static void
+nv_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned char seq1 = 0, crtc17 = 0;
+ unsigned char crtc1A;
+
+ NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode,
+ nv_crtc->index);
+
+ if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
+ return;
+
+ nv_crtc->last_dpms = mode;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, nv_crtc->index);
+
+ /* nv4ref indicates these two RPC1 bits inhibit h/v sync */
+ crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
+ NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
+ switch (mode) {
+ case DRM_MODE_DPMS_STANDBY:
+ /* Screen: Off; HSync: Off, VSync: On -- Not Supported */
+ seq1 = 0x20;
+ crtc17 = 0x80;
+ crtc1A |= 0x80;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Screen: Off; HSync: On, VSync: Off -- Not Supported */
+ seq1 = 0x20;
+ crtc17 = 0x80;
+ crtc1A |= 0x40;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Screen: Off; HSync: Off, VSync: Off */
+ seq1 = 0x20;
+ crtc17 = 0x00;
+ crtc1A |= 0xC0;
+ break;
+ case DRM_MODE_DPMS_ON:
+ default:
+ /* Screen: On; HSync: On, VSync: On */
+ seq1 = 0x00;
+ crtc17 = 0x80;
+ break;
+ }
+
+ NVVgaSeqReset(dev, nv_crtc->index, true);
+ /* Each head has it's own sequencer, so we can turn it off when we want */
+ seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
+ NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
+ crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
+ mdelay(10);
+ NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
+ NVVgaSeqReset(dev, nv_crtc->index, false);
+
+ NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+}
+
+static bool
+nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void
+nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct drm_framebuffer *fb = crtc->fb;
+
+ /* Calculate our timings */
+ int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
+ int horizStart = (mode->crtc_hsync_start >> 3) - 1;
+ int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
+ int horizTotal = (mode->crtc_htotal >> 3) - 5;
+ int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
+ int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
+ int vertDisplay = mode->crtc_vdisplay - 1;
+ int vertStart = mode->crtc_vsync_start - 1;
+ int vertEnd = mode->crtc_vsync_end - 1;
+ int vertTotal = mode->crtc_vtotal - 2;
+ int vertBlankStart = mode->crtc_vdisplay - 1;
+ int vertBlankEnd = mode->crtc_vtotal - 1;
+
+ struct drm_encoder *encoder;
+ bool fp_output = false;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (encoder->crtc == crtc &&
+ (nv_encoder->dcb->type == OUTPUT_LVDS ||
+ nv_encoder->dcb->type == OUTPUT_TMDS))
+ fp_output = true;
+ }
+
+ if (fp_output) {
+ vertStart = vertTotal - 3;
+ vertEnd = vertTotal - 2;
+ vertBlankStart = vertStart;
+ horizStart = horizTotal - 5;
+ horizEnd = horizTotal - 2;
+ horizBlankEnd = horizTotal + 4;
+#if 0
+ if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
+ /* This reportedly works around some video overlay bandwidth problems */
+ horizTotal += 2;
+#endif
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vertTotal |= 1;
+
+#if 0
+ ErrorF("horizDisplay: 0x%X \n", horizDisplay);
+ ErrorF("horizStart: 0x%X \n", horizStart);
+ ErrorF("horizEnd: 0x%X \n", horizEnd);
+ ErrorF("horizTotal: 0x%X \n", horizTotal);
+ ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
+ ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
+ ErrorF("vertDisplay: 0x%X \n", vertDisplay);
+ ErrorF("vertStart: 0x%X \n", vertStart);
+ ErrorF("vertEnd: 0x%X \n", vertEnd);
+ ErrorF("vertTotal: 0x%X \n", vertTotal);
+ ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
+ ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
+#endif
+
+ /*
+ * compute correct Hsync & Vsync polarity
+ */
+ if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
+ && (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
+
+ regp->MiscOutReg = 0x23;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ regp->MiscOutReg |= 0x40;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ regp->MiscOutReg |= 0x80;
+ } else {
+ int vdisplay = mode->vdisplay;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ vdisplay *= 2;
+ if (mode->vscan > 1)
+ vdisplay *= mode->vscan;
+ if (vdisplay < 400)
+ regp->MiscOutReg = 0xA3; /* +hsync -vsync */
+ else if (vdisplay < 480)
+ regp->MiscOutReg = 0x63; /* -hsync +vsync */
+ else if (vdisplay < 768)
+ regp->MiscOutReg = 0xE3; /* -hsync -vsync */
+ else
+ regp->MiscOutReg = 0x23; /* +hsync +vsync */
+ }
+
+ regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
+
+ /*
+ * Time Sequencer
+ */
+ regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
+ /* 0x20 disables the sequencer */
+ if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
+ regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
+ else
+ regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
+ regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
+ regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
+ regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
+
+ /*
+ * CRTC
+ */
+ regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
+ regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
+ regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
+ regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
+ XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
+ regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
+ regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
+ XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
+ regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
+ regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
+ XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
+ XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
+ (1 << 4) |
+ XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
+ XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
+ XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
+ XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
+ regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
+ 1 << 6 |
+ XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
+ regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
+ regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
+ regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
+ /* framebuffer can be larger than crtc scanout area. */
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+ regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
+ regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
+ regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
+ regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
+
+ /*
+ * Some extended CRTC registers (they are not saved with the rest of the vga regs).
+ */
+
+ /* framebuffer can be larger than crtc scanout area. */
+ regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
+ MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
+ regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
+ XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
+ XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
+ XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
+ XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
+ regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
+ XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
+ XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
+ XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
+ regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
+ XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
+ XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
+ XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ horizTotal = (horizTotal >> 1) & ~1;
+ regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
+ regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
+ } else
+ regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */
+
+ /*
+ * Graphics Display Controller
+ */
+ regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
+ regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
+ regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
+ regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
+
+ regp->Attribute[0] = 0x00; /* standard colormap translation */
+ regp->Attribute[1] = 0x01;
+ regp->Attribute[2] = 0x02;
+ regp->Attribute[3] = 0x03;
+ regp->Attribute[4] = 0x04;
+ regp->Attribute[5] = 0x05;
+ regp->Attribute[6] = 0x06;
+ regp->Attribute[7] = 0x07;
+ regp->Attribute[8] = 0x08;
+ regp->Attribute[9] = 0x09;
+ regp->Attribute[10] = 0x0A;
+ regp->Attribute[11] = 0x0B;
+ regp->Attribute[12] = 0x0C;
+ regp->Attribute[13] = 0x0D;
+ regp->Attribute[14] = 0x0E;
+ regp->Attribute[15] = 0x0F;
+ regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
+ /* Non-vga */
+ regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
+ regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
+ regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
+ regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static void
+nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+ struct drm_encoder *encoder;
+ bool lvds_output = false, tmds_output = false, tv_output = false,
+ off_chip_digital = false;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ bool digital = false;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ digital = lvds_output = true;
+ if (nv_encoder->dcb->type == OUTPUT_TV)
+ tv_output = true;
+ if (nv_encoder->dcb->type == OUTPUT_TMDS)
+ digital = tmds_output = true;
+ if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
+ off_chip_digital = true;
+ }
+
+ /* Registers not directly related to the (s)vga mode */
+
+ /* What is the meaning of this register? */
+ /* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
+ regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
+
+ regp->crtc_eng_ctrl = 0;
+ /* Except for rare conditions I2C is enabled on the primary crtc */
+ if (nv_crtc->index == 0)
+ regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
+#if 0
+ /* Set overlay to desired crtc. */
+ if (dev->overlayAdaptor) {
+ NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
+ if (pPriv->overlayCRTC == nv_crtc->index)
+ regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
+ }
+#endif
+
+ /* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
+ regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
+ NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
+ NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
+ if (dev_priv->chipset >= 0x11)
+ regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
+
+ /* Unblock some timings */
+ regp->CRTC[NV_CIO_CRE_53] = 0;
+ regp->CRTC[NV_CIO_CRE_54] = 0;
+
+ /* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
+ if (lvds_output)
+ regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
+ else if (tmds_output)
+ regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
+ else
+ regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
+
+ /* These values seem to vary */
+ /* This register seems to be used by the bios to make certain decisions on some G70 cards? */
+ regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
+
+ nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
+
+ /* probably a scratch reg, but kept for cargo-cult purposes:
+ * bit0: crtc0?, head A
+ * bit6: lvds, head A
+ * bit7: (only in X), head A
+ */
+ if (nv_crtc->index == 0)
+ regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
+
+ /* The blob seems to take the current value from crtc 0, add 4 to that
+ * and reuse the old value for crtc 1 */
+ regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
+ if (!nv_crtc->index)
+ regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
+
+ /* the blob sometimes sets |= 0x10 (which is the same as setting |=
+ * 1 << 30 on 0x60.830), for no apparent reason */
+ regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
+
+ regp->crtc_830 = mode->crtc_vdisplay - 3;
+ regp->crtc_834 = mode->crtc_vdisplay - 1;
+
+ if (dev_priv->card_type == NV_40)
+ /* This is what the blob does */
+ regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
+
+ if (dev_priv->card_type >= NV_30)
+ regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
+
+ regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+
+ /* Some misc regs */
+ if (dev_priv->card_type == NV_40) {
+ regp->CRTC[NV_CIO_CRE_85] = 0xFF;
+ regp->CRTC[NV_CIO_CRE_86] = 0x1;
+ }
+
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
+ /* Enable slaved mode (called MODE_TV in nv4ref.h) */
+ if (lvds_output || tmds_output || tv_output)
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
+
+ /* Generic PRAMDAC regs */
+
+ if (dev_priv->card_type >= NV_10)
+ /* Only bit that bios and blob set. */
+ regp->nv10_cursync = (1 << 25);
+
+ regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+ NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
+ NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
+ if (crtc->fb->depth == 16)
+ regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ if (dev_priv->chipset >= 0x11)
+ regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
+
+ regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
+ regp->tv_setup = 0;
+
+ nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
+
+ /* Some values the blob sets */
+ regp->ramdac_8c0 = 0x100;
+ regp->ramdac_a20 = 0x0;
+ regp->ramdac_a24 = 0xfffff;
+ regp->ramdac_a34 = 0x1;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static int
+nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
+ drm_mode_debug_printmodeline(adjusted_mode);
+
+ /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
+ nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
+
+ nv_crtc_mode_set_vga(crtc, adjusted_mode);
+ /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
+ if (dev_priv->card_type == NV_40)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+ nv_crtc_mode_set_regs(crtc, adjusted_mode);
+ nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
+ return 0;
+}
+
+static void nv_crtc_save(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
+ struct nv04_mode_state *saved = &dev_priv->saved_reg;
+ struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
+
+ if (nv_two_heads(crtc->dev))
+ NVSetOwner(crtc->dev, nv_crtc->index);
+
+ nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
+
+ /* init some state to saved value */
+ state->sel_clk = saved->sel_clk & ~(0x5 << 16);
+ crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
+ state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
+ crtc_state->gpio_ext = crtc_saved->gpio_ext;
+}
+
+static void nv_crtc_restore(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ int head = nv_crtc->index;
+ uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
+
+ if (nv_two_heads(crtc->dev))
+ NVSetOwner(crtc->dev, head);
+
+ nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
+ nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
+
+ nv_crtc->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, nv_crtc->index);
+
+ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ NVBlankScreen(dev, nv_crtc->index, true);
+
+ /* Some more preperation. */
+ NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
+ if (dev_priv->card_type == NV_40) {
+ uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
+ NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
+ }
+}
+
+static void nv_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
+ nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
+
+#ifdef __BIG_ENDIAN
+ /* turn on LFB swapping */
+ {
+ uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
+ tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
+ NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
+ }
+#endif
+
+ funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void nv_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG(crtc->dev, "\n");
+
+ if (!nv_crtc)
+ return;
+
+ drm_crtc_cleanup(crtc);
+
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ kfree(nv_crtc);
+}
+
+static void
+nv_crtc_gamma_load(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
+ int i;
+
+ rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
+ for (i = 0; i < 256; i++) {
+ rgbs[i].r = nv_crtc->lut.r[i] >> 8;
+ rgbs[i].g = nv_crtc->lut.g[i] >> 8;
+ rgbs[i].b = nv_crtc->lut.b[i] >> 8;
+ }
+
+ nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
+}
+
+static void
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int i;
+
+ if (size != 256)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
+ }
+
+ /* We need to know the depth before we upload, but it's possible to
+ * get called before a framebuffer is bound. If this is the case,
+ * mark the lut values as dirty by setting depth==0, and it'll be
+ * uploaded on the first mode_set_base()
+ */
+ if (!nv_crtc->base.fb) {
+ nv_crtc->lut.depth = 0;
+ return;
+ }
+
+ nv_crtc_gamma_load(crtc);
+}
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ int arb_burst, arb_lwm;
+ int ret;
+
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
+
+ nv_crtc->fb.offset = fb->nvbo->bo.offset;
+
+ if (nv_crtc->lut.depth != drm_fb->depth) {
+ nv_crtc->lut.depth = drm_fb->depth;
+ nv_crtc_gamma_load(crtc);
+ }
+
+ /* Update the framebuffer format. */
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
+ regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ if (crtc->fb->depth == 16)
+ regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
+ NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
+ regp->ramdac_gen_ctrl);
+
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+ regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+ XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
+
+ /* Update the framebuffer location. */
+ regp->fb_start = nv_crtc->fb.offset & ~3;
+ regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+ NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
+
+ /* Update the arbitration parameters. */
+ nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
+ &arb_burst, &arb_lwm);
+
+ regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
+ regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
+
+ if (dev_priv->card_type >= NV_30) {
+ regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
+ }
+
+ return 0;
+}
+
+static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+ struct nouveau_bo *dst)
+{
+ int width = nv_cursor_width(dev);
+ uint32_t pixel;
+ int i, j;
+
+ for (i = 0; i < width; i++) {
+ for (j = 0; j < width; j++) {
+ pixel = nouveau_bo_rd32(src, i*64 + j);
+
+ nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
+ | (pixel & 0xf80000) >> 9
+ | (pixel & 0xf800) >> 6
+ | (pixel & 0xf8) >> 3);
+ }
+ }
+}
+
+static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+ struct nouveau_bo *dst)
+{
+ uint32_t pixel;
+ int alpha, i;
+
+ /* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
+ * cursors (though NPM in combination with fp dithering may not work on
+ * nv11, from "nv" driver history)
+ * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
+ * blob uses, however we get given PM cursors so we use PM mode
+ */
+ for (i = 0; i < 64 * 64; i++) {
+ pixel = nouveau_bo_rd32(src, i);
+
+ /* hw gets unhappy if alpha <= rgb values. for a PM image "less
+ * than" shouldn't happen; fix "equal to" case by adding one to
+ * alpha channel (slightly inaccurate, but so is attempting to
+ * get back to NPM images, due to limits of integer precision)
+ */
+ alpha = pixel >> 24;
+ if (alpha > 0 && alpha < 255)
+ pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
+
+#ifdef __BIG_ENDIAN
+ {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0x11) {
+ pixel = ((pixel & 0x000000ff) << 24) |
+ ((pixel & 0x0000ff00) << 8) |
+ ((pixel & 0x00ff0000) >> 8) |
+ ((pixel & 0xff000000) >> 24);
+ }
+ }
+#endif
+
+ nouveau_bo_wr32(dst, i, pixel);
+ }
+}
+
+static int
+nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t buffer_handle, uint32_t width, uint32_t height)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_bo *cursor = NULL;
+ struct drm_gem_object *gem;
+ int ret = 0;
+
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ if (!buffer_handle) {
+ nv_crtc->cursor.hide(nv_crtc, true);
+ return 0;
+ }
+
+ gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+ if (!gem)
+ return -EINVAL;
+ cursor = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(cursor);
+ if (ret)
+ goto out;
+
+ if (dev_priv->chipset >= 0x11)
+ nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+ else
+ nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+
+ nouveau_bo_unmap(cursor);
+ nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+ nv_crtc->cursor.show(nv_crtc, true);
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int
+nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->cursor.set_pos(nv_crtc, x, y);
+ return 0;
+}
+
+static const struct drm_crtc_funcs nv04_crtc_funcs = {
+ .save = nv_crtc_save,
+ .restore = nv_crtc_restore,
+ .cursor_set = nv04_crtc_cursor_set,
+ .cursor_move = nv04_crtc_cursor_move,
+ .gamma_set = nv_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
+ .dpms = nv_crtc_dpms,
+ .prepare = nv_crtc_prepare,
+ .commit = nv_crtc_commit,
+ .mode_fixup = nv_crtc_mode_fixup,
+ .mode_set = nv_crtc_mode_set,
+ .mode_set_base = nv04_crtc_mode_set_base,
+ .load_lut = nv_crtc_gamma_load,
+};
+
+int
+nv04_crtc_create(struct drm_device *dev, int crtc_num)
+{
+ struct nouveau_crtc *nv_crtc;
+ int ret, i;
+
+ nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+ if (!nv_crtc)
+ return -ENOMEM;
+
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = i << 8;
+ nv_crtc->lut.g[i] = i << 8;
+ nv_crtc->lut.b[i] = i << 8;
+ }
+ nv_crtc->lut.depth = 0;
+
+ nv_crtc->index = crtc_num;
+ nv_crtc->last_dpms = NV_DPMS_CLEARED;
+
+ drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
+ drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
+
+ ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ }
+
+ nv04_cursor_init(nv_crtc);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
new file mode 100644
index 00000000000..89a91b9d8b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -0,0 +1,70 @@
+#include "drmP.h"
+#include "drm_mode.h"
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+
+static void
+nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
+{
+ nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
+}
+
+static void
+nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
+{
+ nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
+}
+
+static void
+nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+ NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
+ NV_PRAMDAC_CU_START_POS,
+ XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
+ XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
+}
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+ NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+ crtcstate->CRTC[index]);
+}
+
+static void
+nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct drm_crtc *crtc = &nv_crtc->base;
+
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
+ MASK(NV_CIO_CRE_HCUR_ASI) |
+ XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
+ XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
+ if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
+ MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
+
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+ if (dev_priv->card_type == NV_40)
+ nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
+}
+
+int
+nv04_cursor_init(struct nouveau_crtc *crtc)
+{
+ crtc->cursor.set_offset = nv04_cursor_set_offset;
+ crtc->cursor.set_pos = nv04_cursor_set_pos;
+ crtc->cursor.hide = nv04_cursor_hide;
+ crtc->cursor.show = nv04_cursor_show;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
new file mode 100644
index 00000000000..a5fa51714e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+int nv04_dac_output_offset(struct drm_encoder *encoder)
+{
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ int offset = 0;
+
+ if (dcb->or & (8 | OUTPUT_C))
+ offset += 0x68;
+ if (dcb->or & (8 | OUTPUT_B))
+ offset += 0x2000;
+
+ return offset;
+}
+
+/*
+ * arbitrary limit to number of sense oscillations tolerated in one sample
+ * period (observed to be at least 13 in "nvidia")
+ */
+#define MAX_HBLANK_OSC 20
+
+/*
+ * arbitrary limit to number of conflicting sample pairs to tolerate at a
+ * voltage step (observed to be at least 5 in "nvidia")
+ */
+#define MAX_SAMPLE_PAIRS 10
+
+static int sample_load_twice(struct drm_device *dev, bool sense[2])
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ bool sense_a, sense_b, sense_b_prime;
+ int j = 0;
+
+ /*
+ * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
+ * then wait for transition 0x4->0x5->0x4: enter hblank, leave
+ * hblank again
+ * use a 10ms timeout (guards against crtc being inactive, in
+ * which case blank state would never change)
+ */
+ if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
+ return -EBUSY;
+ if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000001))
+ return -EBUSY;
+ if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
+ return -EBUSY;
+
+ udelay(100);
+ /* when level triggers, sense is _LO_ */
+ sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+
+ /* take another reading until it agrees with sense_a... */
+ do {
+ udelay(100);
+ sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+ if (sense_a != sense_b) {
+ sense_b_prime =
+ nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+ if (sense_b == sense_b_prime) {
+ /* ... unless two consecutive subsequent
+ * samples agree; sense_a is replaced */
+ sense_a = sense_b;
+ /* force mis-match so we loop */
+ sense_b = !sense_a;
+ }
+ }
+ } while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
+
+ if (j == MAX_HBLANK_OSC)
+ /* with so much oscillation, default to sense:LO */
+ sense[i] = false;
+ else
+ sense[i] = sense_a;
+ }
+
+ return 0;
+}
+
+static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ uint8_t saved_seq1, saved_pi, saved_rpc1;
+ uint8_t saved_palette0[3], saved_palette_mask;
+ uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
+ int i;
+ uint8_t blue;
+ bool sense = true;
+
+ /*
+ * for this detection to work, there needs to be a mode set up on the
+ * CRTC. this is presumed to be the case
+ */
+
+ if (nv_two_heads(dev))
+ /* only implemented for head A for now */
+ NVSetOwner(dev, 0);
+
+ saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
+
+ saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
+ saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+ msleep(10);
+
+ saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
+ saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
+ saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
+
+ nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
+ for (i = 0; i < 3; i++)
+ saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
+ saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
+
+ saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
+ (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+ NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
+ NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
+
+ blue = 8; /* start of test range */
+
+ do {
+ bool sense_pair[2];
+
+ nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
+ /* testing blue won't find monochrome monitors. I don't care */
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
+
+ i = 0;
+ /* take sample pairs until both samples in the pair agree */
+ do {
+ if (sample_load_twice(dev, sense_pair))
+ goto out;
+ } while ((sense_pair[0] != sense_pair[1]) &&
+ ++i < MAX_SAMPLE_PAIRS);
+
+ if (i == MAX_SAMPLE_PAIRS)
+ /* too much oscillation defaults to LO */
+ sense = false;
+ else
+ sense = sense_pair[0];
+
+ /*
+ * if sense goes LO before blue ramps to 0x18, monitor is not connected.
+ * ergo, if blue gets to 0x18, monitor must be connected
+ */
+ } while (++blue < 0x18 && sense);
+
+out:
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
+ nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ for (i = 0; i < 3; i++)
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+
+ if (blue == 0x18) {
+ NV_TRACE(dev, "Load detected on head A\n");
+ return connector_status_connected;
+ }
+
+ return connector_status_disconnected;
+}
+
+enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
+ saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
+ int head, present = 0;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+ if (dcb->type == OUTPUT_TV) {
+ testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
+
+ if (dev_priv->vbios->tvdactestval)
+ testval = dev_priv->vbios->tvdactestval;
+ } else {
+ testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
+
+ if (dev_priv->vbios->dactestval)
+ testval = dev_priv->vbios->dactestval;
+ }
+
+ saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
+ saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+ saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
+
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
+ if (regoffset == 0x68) {
+ saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+ }
+
+ saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+
+ msleep(4);
+
+ saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+ head = (saved_routput & 0x100) >> 8;
+#if 0
+ /* if there's a spare crtc, using it will minimise flicker for the case
+ * where the in-use crtc is in use by an off-chip tmds encoder */
+ if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
+ head ^= 1;
+#endif
+ /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
+ routput = (saved_routput & 0xfffffece) | head << 8;
+
+ if (dev_priv->card_type >= NV_40) {
+ if (dcb->type == OUTPUT_TV)
+ routput |= 0x1a << 16;
+ else
+ routput &= ~(0x1a << 16);
+ }
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
+ msleep(1);
+
+ temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
+ NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
+ temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+ temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+ msleep(5);
+
+ temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+
+ if (dcb->type == OUTPUT_TV)
+ present = (nv17_tv_detect(encoder, connector, temp)
+ == connector_status_connected);
+ else
+ present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
+
+ temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+ temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
+
+ /* bios does something more complex for restoring, but I think this is good enough */
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
+ if (regoffset == 0x68)
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+
+ if (present) {
+ NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
+ return connector_status_connected;
+ }
+
+ return connector_status_disconnected;
+}
+
+
+static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void nv04_dac_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_disable(dev, head);
+
+ /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+ * at LCD__INDEX which we don't alter
+ */
+ if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
+ crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+}
+
+
+static void nv04_dac_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+
+ NV_TRACE(dev, "%s called for encoder %d\n", __func__,
+ nv_encoder->dcb->index);
+
+ if (nv_gf4_disp_arch(dev)) {
+ struct drm_encoder *rebind;
+ uint32_t dac_offset = nv04_dac_output_offset(encoder);
+ uint32_t otherdac;
+
+ /* bit 16-19 are bits that are set on some G70 cards,
+ * but don't seem to have much effect */
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+ head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+ /* force any other vga encoders to bind to the other crtc */
+ list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
+ if (rebind == encoder
+ || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
+ continue;
+
+ dac_offset = nv04_dac_output_offset(rebind);
+ otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+ (otherdac & ~0x0100) | (head ^ 1) << 8);
+ }
+ }
+
+ /* This could use refinement for flatpanels, but it should work this way */
+ if (dev_priv->chipset < 0x44)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+ else
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+}
+
+static void nv04_dac_commit(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+
+ if (nv_gf4_disp_arch(dev)) {
+ uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
+ int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
+ uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
+
+ if (enable) {
+ *dac_users |= 1 << dcb->index;
+ NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+
+ } else {
+ *dac_users &= ~(1 << dcb->index);
+ if (!*dac_users)
+ NVWriteRAMDAC(dev, 0, dacclk_off,
+ dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
+ }
+ }
+}
+
+static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->last_dpms == mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+ NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv04_dac_save(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+
+ if (nv_gf4_disp_arch(dev))
+ nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder));
+}
+
+static void nv04_dac_restore(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+
+ if (nv_gf4_disp_arch(dev))
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
+ nv_encoder->restore.output);
+
+ nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dac_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ NV_DEBUG(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
+ .dpms = nv04_dac_dpms,
+ .save = nv04_dac_save,
+ .restore = nv04_dac_restore,
+ .mode_fixup = nv04_dac_mode_fixup,
+ .prepare = nv04_dac_prepare,
+ .commit = nv04_dac_commit,
+ .mode_set = nv04_dac_mode_set,
+ .detect = nv04_dac_detect
+};
+
+static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
+ .dpms = nv04_dac_dpms,
+ .save = nv04_dac_save,
+ .restore = nv04_dac_restore,
+ .mode_fixup = nv04_dac_mode_fixup,
+ .prepare = nv04_dac_prepare,
+ .commit = nv04_dac_commit,
+ .mode_set = nv04_dac_mode_set,
+ .detect = nv17_dac_detect
+};
+
+static const struct drm_encoder_funcs nv04_dac_funcs = {
+ .destroy = nv04_dac_destroy,
+};
+
+int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ const struct drm_encoder_helper_funcs *helper;
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder = NULL;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ if (nv_gf4_disp_arch(dev))
+ helper = &nv17_dac_helper_funcs;
+ else
+ helper = &nv04_dac_helper_funcs;
+
+ drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, helper);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
new file mode 100644
index 00000000000..e5b33339d59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
+ NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
+ NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
+#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \
+ NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \
+ NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
+
+static inline bool is_fpc_off(uint32_t fpc)
+{
+ return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
+ FP_TG_CONTROL_OFF);
+}
+
+int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
+{
+ /* special case of nv_read_tmds to find crtc associated with an output.
+ * this does not give a correct answer for off-chip dvi, but there's no
+ * use for such an answer anyway
+ */
+ int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
+ NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
+ return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
+}
+
+void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
+ int head, bool dl)
+{
+ /* The BIOS scripts don't do this for us, sadly
+ * Luckily we do know the values ;-)
+ *
+ * head < 0 indicates we wish to force a setting with the overrideval
+ * (for VT restore etc.)
+ */
+
+ int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+ uint8_t tmds04 = 0x80;
+
+ if (head != ramdac)
+ tmds04 = 0x88;
+
+ if (dcbent->type == OUTPUT_LVDS)
+ tmds04 |= 0x01;
+
+ nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
+
+ if (dl) /* dual link */
+ nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
+}
+
+void nv04_dfp_disable(struct drm_device *dev, int head)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+
+ if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
+ FP_TG_CONTROL_ON) {
+ /* digital remnants must be cleaned before new crtc
+ * values programmed. delay is time for the vga stuff
+ * to realise it's in control again
+ */
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+ FP_TG_CONTROL_OFF);
+ msleep(50);
+ }
+ /* don't inadvertently turn it on when state written later */
+ crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
+}
+
+void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct nouveau_crtc *nv_crtc;
+ uint32_t *fpc;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ nv_crtc = nouveau_crtc(encoder->crtc);
+ fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+ if (is_fpc_off(*fpc)) {
+ /* using saved value is ok, as (is_digital && dpms_on &&
+ * fp_control==OFF) is (at present) *only* true when
+ * fpc's most recent change was by below "off" code
+ */
+ *fpc = nv_crtc->dpms_saved_fp_control;
+ }
+
+ nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
+ NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+ } else {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ nv_crtc = nouveau_crtc(crtc);
+ fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+ nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
+ if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
+ nv_crtc->dpms_saved_fp_control = *fpc;
+ /* cut the FP output */
+ *fpc &= ~FP_TG_CONTROL_ON;
+ *fpc |= FP_TG_CONTROL_OFF;
+ NVWriteRAMDAC(dev, nv_crtc->index,
+ NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+ }
+ }
+ }
+}
+
+static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+ /* For internal panels and gpu scaling on DVI we need the native mode */
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ if (!nv_connector->native_mode)
+ return false;
+ nv_encoder->mode = *nv_connector->native_mode;
+ adjusted_mode->clock = nv_connector->native_mode->clock;
+ } else {
+ nv_encoder->mode = *adjusted_mode;
+ }
+
+ return true;
+}
+
+static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
+ struct nouveau_encoder *nv_encoder, int head)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
+
+ if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
+ return;
+
+ /* SEL_CLK is only used on the primary ramdac
+ * It toggles spread spectrum PLL output and sets the bindings of PLLs
+ * to heads on digital outputs
+ */
+ if (head)
+ state->sel_clk |= bits1618;
+ else
+ state->sel_clk &= ~bits1618;
+
+ /* nv30:
+ * bit 0 NVClk spread spectrum on/off
+ * bit 2 MemClk spread spectrum on/off
+ * bit 4 PixClk1 spread spectrum on/off toggle
+ * bit 6 PixClk2 spread spectrum on/off toggle
+ *
+ * nv40 (observations from bios behaviour and mmio traces):
+ * bits 4&6 as for nv30
+ * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6;
+ * maybe a different spread mode
+ * bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
+ * The logic behind turning spread spectrum on/off in the first place,
+ * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
+ * entry has the necessary info)
+ */
+ if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
+ int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
+
+ state->sel_clk &= ~0xf0;
+ state->sel_clk |= (head ? 0x40 : 0x10) << shift;
+ }
+}
+
+static void nv04_dfp_prepare(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+ uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
+ uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
+
+ /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+ * at LCD__INDEX which we don't alter
+ */
+ if (!(*cr_lcd & 0x44)) {
+ *cr_lcd = 0x3;
+
+ if (nv_two_heads(dev)) {
+ if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
+ *cr_lcd |= head ? 0x0 : 0x8;
+ else {
+ *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ *cr_lcd |= 0x30;
+ if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
+ /* avoid being connected to both crtcs */
+ *cr_lcd_oth &= ~0x30;
+ NVWriteVgaCrtc(dev, head ^ 1,
+ NV_CIO_CRE_LCD__INDEX,
+ *cr_lcd_oth);
+ }
+ }
+ }
+ }
+}
+
+
+static void nv04_dfp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+ struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_display_mode *output_mode = &nv_encoder->mode;
+ uint32_t mode_ratio, panel_ratio;
+
+ NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
+ drm_mode_debug_printmodeline(output_mode);
+
+ /* Initialize the FP registers in this CRTC. */
+ regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+ regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+ if (!nv_gf4_disp_arch(dev) ||
+ (output_mode->hsync_start - output_mode->hdisplay) >=
+ dev_priv->vbios->digital_min_front_porch)
+ regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
+ else
+ regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
+ regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
+ regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+ regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
+ regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
+
+ regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+ regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+ regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
+ regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
+ regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+ regp->fp_vert_regs[FP_VALID_START] = 0;
+ regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
+
+ /* bit26: a bit seen on some g7x, no as yet discernable purpose */
+ regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+ (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
+ /* Deal with vsync/hsync polarity */
+ /* LVDS screens do set this, but modes with +ve syncs are very rare */
+ if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+ if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+ /* panel scaling first, as native would get set otherwise */
+ if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
+ nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
+ else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
+ adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
+ else /* gpu needs to scale */
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
+ if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+ if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
+ output_mode->clock > 165000)
+ regp->fp_control |= (2 << 24);
+ if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+ bool duallink, dummy;
+
+ nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
+ clock, &duallink, &dummy);
+ if (duallink)
+ regp->fp_control |= (8 << 28);
+ } else
+ if (output_mode->clock > 165000)
+ regp->fp_control |= (8 << 28);
+
+ regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+ NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+ NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+ NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+ NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+ /* We want automatic scaling */
+ regp->fp_debug_1 = 0;
+ /* This can override HTOTAL and VTOTAL */
+ regp->fp_debug_2 = 0;
+
+ /* Use 20.12 fixed point format to avoid floats */
+ mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
+ panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
+ /* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
+ * get treated the same as SCALE_FULLSCREEN */
+ if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
+ mode_ratio != panel_ratio) {
+ uint32_t diff, scale;
+ bool divide_by_2 = nv_gf4_disp_arch(dev);
+
+ if (mode_ratio < panel_ratio) {
+ /* vertical needs to expand to glass size (automatic)
+ * horizontal needs to be scaled at vertical scale factor
+ * to maintain aspect */
+
+ scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
+ regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+ XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+ /* restrict area of screen used, horizontally */
+ diff = output_mode->hdisplay -
+ output_mode->vdisplay * mode_ratio / (1 << 12);
+ regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
+ regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
+ }
+
+ if (mode_ratio > panel_ratio) {
+ /* horizontal needs to expand to glass size (automatic)
+ * vertical needs to be scaled at horizontal scale factor
+ * to maintain aspect */
+
+ scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
+ regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+ XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
+
+ /* restrict area of screen used, vertically */
+ diff = output_mode->vdisplay -
+ (1 << 12) * output_mode->hdisplay / mode_ratio;
+ regp->fp_vert_regs[FP_VALID_START] += diff / 2;
+ regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
+ }
+ }
+
+ /* Output property. */
+ if (nv_connector->use_dithering) {
+ if (dev_priv->chipset == 0x11)
+ regp->dither = savep->dither | 0x00010000;
+ else {
+ int i;
+ regp->dither = savep->dither | 0x00000001;
+ for (i = 0; i < 3; i++) {
+ regp->dither_regs[i] = 0xe4e4e4e4;
+ regp->dither_regs[i + 3] = 0x44444444;
+ }
+ }
+ } else {
+ if (dev_priv->chipset != 0x11) {
+ /* reset them */
+ int i;
+ for (i = 0; i < 3; i++) {
+ regp->dither_regs[i] = savep->dither_regs[i];
+ regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
+ }
+ }
+ regp->dither = savep->dither;
+ }
+
+ regp->fp_margin_color = 0;
+}
+
+static void nv04_dfp_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct dcb_entry *dcbe = nv_encoder->dcb;
+ int head = nouveau_crtc(encoder->crtc)->index;
+
+ NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index);
+
+ if (dcbe->type == OUTPUT_TMDS)
+ run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
+ else if (dcbe->type == OUTPUT_LVDS)
+ call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
+
+ /* update fp_control state for any changes made by scripts,
+ * so correct value is written at DPMS on */
+ dev_priv->mode_reg.crtc_reg[head].fp_control =
+ NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+
+ /* This could use refinement for flatpanels, but it should work this way */
+ if (dev_priv->chipset < 0x44)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+ else
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static inline bool is_powersaving_dpms(int mode)
+{
+ return (mode != DRM_MODE_DPMS_ON);
+}
+
+static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
+
+ if (nv_encoder->last_dpms == mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+ NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ if (was_powersaving && is_powersaving_dpms(mode))
+ return;
+
+ if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
+ struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+ /* when removing an output, crtc may not be set, but PANEL_OFF
+ * must still be run
+ */
+ int head = crtc ? nouveau_crtc(crtc)->index :
+ nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ if (!nv_connector->native_mode) {
+ NV_ERROR(dev, "Not turning on LVDS without native mode\n");
+ return;
+ }
+ call_lvds_script(dev, nv_encoder->dcb, head,
+ LVDS_PANEL_ON, nv_connector->native_mode->clock);
+ } else
+ /* pxclk of 0 is fine for PANEL_OFF, and for a
+ * disconnected LVDS encoder there is no native_mode
+ */
+ call_lvds_script(dev, nv_encoder->dcb, head,
+ LVDS_PANEL_OFF, 0);
+ }
+
+ nv04_dfp_update_fp_control(encoder, mode);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
+ else {
+ dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+ dev_priv->mode_reg.sel_clk &= ~0xf0;
+ }
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+}
+
+static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->last_dpms == mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+ NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dfp_update_fp_control(encoder, mode);
+}
+
+static void nv04_dfp_save(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+
+ if (nv_two_heads(dev))
+ nv_encoder->restore.head =
+ nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+}
+
+static void nv04_dfp_restore(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nv_encoder->restore.head;
+
+ if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+ struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
+ if (native_mode)
+ call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
+ native_mode->clock);
+ else
+ NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+
+ } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
+ int clock = nouveau_hw_pllvals_to_clk
+ (&dev_priv->saved_reg.crtc_reg[head].pllvals);
+
+ run_tmds_table(dev, nv_encoder->dcb, head, clock);
+ }
+
+ nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dfp_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ NV_DEBUG(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
+ .dpms = nv04_lvds_dpms,
+ .save = nv04_dfp_save,
+ .restore = nv04_dfp_restore,
+ .mode_fixup = nv04_dfp_mode_fixup,
+ .prepare = nv04_dfp_prepare,
+ .commit = nv04_dfp_commit,
+ .mode_set = nv04_dfp_mode_set,
+ .detect = NULL,
+};
+
+static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
+ .dpms = nv04_tmds_dpms,
+ .save = nv04_dfp_save,
+ .restore = nv04_dfp_restore,
+ .mode_fixup = nv04_dfp_mode_fixup,
+ .prepare = nv04_dfp_prepare,
+ .commit = nv04_dfp_commit,
+ .mode_set = nv04_dfp_mode_set,
+ .detect = NULL,
+};
+
+static const struct drm_encoder_funcs nv04_dfp_funcs = {
+ .destroy = nv04_dfp_destroy,
+};
+
+int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ const struct drm_encoder_helper_funcs *helper;
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder = NULL;
+ int type;
+
+ switch (entry->type) {
+ case OUTPUT_TMDS:
+ type = DRM_MODE_ENCODER_TMDS;
+ helper = &nv04_tmds_helper_funcs;
+ break;
+ case OUTPUT_LVDS:
+ type = DRM_MODE_ENCODER_LVDS;
+ helper = &nv04_lvds_helper_funcs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
+ drm_encoder_helper_add(encoder, helper);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
new file mode 100644
index 00000000000..b47c757ff48
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_fb.h"
+#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+
+#define MULTIPLE_ENCODERS(e) (e & (e - 1))
+
+static void
+nv04_display_store_initial_head_owner(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset != 0x11) {
+ dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
+ goto ownerknown;
+ }
+
+ /* reading CR44 is broken on nv11, so we attempt to infer it */
+ if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
+ dev_priv->crtc_owner = 0x4;
+ else {
+ uint8_t slaved_on_A, slaved_on_B;
+ bool tvA = false;
+ bool tvB = false;
+
+ NVLockVgaCrtcs(dev, false);
+
+ slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
+ 0x80;
+ if (slaved_on_B)
+ tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
+ MASK(NV_CIO_CRE_LCD_LCD_SELECT));
+
+ slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
+ 0x80;
+ if (slaved_on_A)
+ tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
+ MASK(NV_CIO_CRE_LCD_LCD_SELECT));
+
+ NVLockVgaCrtcs(dev, true);
+
+ if (slaved_on_A && !tvA)
+ dev_priv->crtc_owner = 0x0;
+ else if (slaved_on_B && !tvB)
+ dev_priv->crtc_owner = 0x3;
+ else if (slaved_on_A)
+ dev_priv->crtc_owner = 0x0;
+ else if (slaved_on_B)
+ dev_priv->crtc_owner = 0x3;
+ else
+ dev_priv->crtc_owner = 0x0;
+ }
+
+ownerknown:
+ NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
+
+ /* we need to ensure the heads are not tied henceforth, or reading any
+ * 8 bit reg on head B will fail
+ * setting a single arbitrary head solves that */
+ NVSetOwner(dev, 0);
+}
+
+int
+nv04_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ uint16_t connector[16] = { 0 };
+ int i, ret;
+
+ NV_DEBUG(dev, "\n");
+
+ if (nv_two_heads(dev))
+ nv04_display_store_initial_head_owner(dev);
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dithering_property(dev);
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ switch (dev_priv->card_type) {
+ case NV_04:
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ break;
+ default:
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ break;
+ }
+
+ dev->mode_config.fb_base = dev_priv->fb_phys;
+
+ nv04_crtc_create(dev, 0);
+ if (nv_two_heads(dev))
+ nv04_crtc_create(dev, 1);
+
+ for (i = 0; i < dcb->entries; i++) {
+ struct dcb_entry *dcbent = &dcb->entry[i];
+
+ switch (dcbent->type) {
+ case OUTPUT_ANALOG:
+ ret = nv04_dac_create(dev, dcbent);
+ break;
+ case OUTPUT_LVDS:
+ case OUTPUT_TMDS:
+ ret = nv04_dfp_create(dev, dcbent);
+ break;
+ case OUTPUT_TV:
+ if (dcbent->location == DCB_LOC_ON_CHIP)
+ ret = nv17_tv_create(dev, dcbent);
+ else
+ ret = nv04_tv_create(dev, dcbent);
+ break;
+ default:
+ NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
+ continue;
+ }
+
+ if (ret)
+ continue;
+
+ connector[dcbent->connector] |= (1 << dcbent->type);
+ }
+
+ for (i = 0; i < dcb->entries; i++) {
+ struct dcb_entry *dcbent = &dcb->entry[i];
+ uint16_t encoders;
+ int type;
+
+ encoders = connector[dcbent->connector];
+ if (!(encoders & (1 << dcbent->type)))
+ continue;
+ connector[dcbent->connector] = 0;
+
+ switch (dcbent->type) {
+ case OUTPUT_ANALOG:
+ if (!MULTIPLE_ENCODERS(encoders))
+ type = DRM_MODE_CONNECTOR_VGA;
+ else
+ type = DRM_MODE_CONNECTOR_DVII;
+ break;
+ case OUTPUT_TMDS:
+ if (!MULTIPLE_ENCODERS(encoders))
+ type = DRM_MODE_CONNECTOR_DVID;
+ else
+ type = DRM_MODE_CONNECTOR_DVII;
+ break;
+ case OUTPUT_LVDS:
+ type = DRM_MODE_CONNECTOR_LVDS;
+#if 0
+ /* don't create i2c adapter when lvds ddc not allowed */
+ if (dcbent->lvdsconf.use_straps_for_mode ||
+ dev_priv->vbios->fp_no_ddc)
+ i2c_index = 0xf;
+#endif
+ break;
+ case OUTPUT_TV:
+ type = DRM_MODE_CONNECTOR_TV;
+ break;
+ default:
+ type = DRM_MODE_CONNECTOR_Unknown;
+ continue;
+ }
+
+ nouveau_connector_create(dev, dcbent->connector, type);
+ }
+
+ /* Save previous state */
+ NVLockVgaCrtcs(dev, false);
+
+ nouveau_hw_save_vga_fonts(dev, 1);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ crtc->funcs->save(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+ func->save(encoder);
+ }
+
+ return 0;
+}
+
+void
+nv04_display_destroy(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ NV_DEBUG(dev, "\n");
+
+ /* Turn every CRTC off. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_mode_set modeset = {
+ .crtc = crtc,
+ };
+
+ crtc->funcs->set_config(&modeset);
+ }
+
+ /* Restore state */
+ NVLockVgaCrtcs(dev, false);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+ func->restore(encoder);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ crtc->funcs->restore(crtc);
+
+ nouveau_hw_save_vga_fonts(dev, 0);
+
+ drm_mode_config_cleanup(dev);
+}
+
+void
+nv04_display_restore(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ NVLockVgaCrtcs(dev, false);
+
+ /* meh.. modeset apparently doesn't setup all the regs and depends
+ * on pre-existing state, for now load the state of the card *before*
+ * nouveau was loaded, and then do a modeset.
+ *
+ * best thing to do probably is to make save/restore routines not
+ * save/restore "pre-load" state, but more general so we can save
+ * on suspend too.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+ func->restore(encoder);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ crtc->funcs->restore(crtc);
+
+ if (nv_two_heads(dev)) {
+ NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
+ dev_priv->crtc_owner);
+ NVSetOwner(dev, dev_priv->crtc_owner);
+ }
+
+ NVLockVgaCrtcs(dev, true);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
new file mode 100644
index 00000000000..638cf601c42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -0,0 +1,21 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_fb_init(struct drm_device *dev)
+{
+ /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+ * nvidia reading PFB_CFG_0, then writing back its original value.
+ * (which was 0x701114 in this case)
+ */
+
+ nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
+ return 0;
+}
+
+void
+nv04_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
new file mode 100644
index 00000000000..09a31071ee5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2009 Ben Skeggs
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+static void
+nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_copyarea(info, region);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
+ OUT_RING(chan, (region->sy << 16) | region->sx);
+ OUT_RING(chan, (region->dy << 16) | region->dx);
+ OUT_RING(chan, (region->height << 16) | region->width);
+ FIRE_RING(chan);
+}
+
+static void
+nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_fillrect(info, rect);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
+ BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
+ OUT_RING(chan, color);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
+ OUT_RING(chan, (rect->dx << 16) | rect->dy);
+ OUT_RING(chan, (rect->width << 16) | rect->height);
+ FIRE_RING(chan);
+}
+
+static void
+nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t fg;
+ uint32_t bg;
+ uint32_t dsize;
+ uint32_t width;
+ uint32_t *data = (uint32_t *)image->data;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (image->depth != 1) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ width = (image->width + 31) & ~31;
+ dsize = (width * image->height) >> 5;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
+ bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
+ } else {
+ fg = image->fg_color;
+ bg = image->bg_color;
+ }
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
+ OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ OUT_RING(chan, ((image->dy + image->height) << 16) |
+ ((image->dx + image->width) & 0xffff));
+ OUT_RING(chan, bg);
+ OUT_RING(chan, fg);
+ OUT_RING(chan, (image->height << 16) | image->width);
+ OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+
+ while (dsize) {
+ int iter_len = dsize > 128 ? 128 : dsize;
+
+ if (RING_SPACE(chan, iter_len + 1)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
+ OUT_RINGp(chan, data, iter_len);
+ data += iter_len;
+ dsize -= iter_len;
+ }
+
+ FIRE_RING(chan);
+}
+
+static int
+nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nv04_fbcon_accel_init(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int surface_fmt, pattern_fmt, rect_fmt;
+ int ret;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ surface_fmt = 1;
+ pattern_fmt = 3;
+ rect_fmt = 3;
+ break;
+ case 16:
+ surface_fmt = 4;
+ pattern_fmt = 1;
+ rect_fmt = 1;
+ break;
+ case 32:
+ switch (info->var.transp.length) {
+ case 0: /* depth 24 */
+ case 8: /* depth 32 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ surface_fmt = 6;
+ pattern_fmt = 3;
+ rect_fmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ 0x0062 : 0x0042, NvCtxSurf2D);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ 0x009f : 0x005f, NvImageBlit);
+ if (ret)
+ return ret;
+
+ if (RING_SPACE(chan, 49)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ return 0;
+ }
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvCtxSurf2D);
+ BEGIN_RING(chan, 1, 0x0184, 2);
+ OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, NvDmaFB);
+ BEGIN_RING(chan, 1, 0x0300, 4);
+ OUT_RING(chan, surface_fmt);
+ OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
+ OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+ OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvRop);
+ BEGIN_RING(chan, 1, 0x0300, 1);
+ OUT_RING(chan, 0x55);
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvImagePatt);
+ BEGIN_RING(chan, 1, 0x0300, 8);
+ OUT_RING(chan, pattern_fmt);
+#ifdef __BIG_ENDIAN
+ OUT_RING(chan, 2);
+#else
+ OUT_RING(chan, 1);
+#endif
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, ~0);
+ OUT_RING(chan, ~0);
+ OUT_RING(chan, ~0);
+ OUT_RING(chan, ~0);
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvClipRect);
+ BEGIN_RING(chan, 1, 0x0300, 2);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
+
+ BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
+ OUT_RING(chan, NvImageBlit);
+ BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
+ OUT_RING(chan, NvCtxSurf2D);
+ BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
+ OUT_RING(chan, 3);
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
+ OUT_RING(chan, NvGdiRect);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
+ OUT_RING(chan, NvCtxSurf2D);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
+ OUT_RING(chan, NvImagePatt);
+ OUT_RING(chan, NvRop);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
+ OUT_RING(chan, rect_fmt);
+ BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ OUT_RING(chan, 3);
+
+ FIRE_RING(chan);
+
+ info->fbops->fb_fillrect = nv04_fbcon_fillrect;
+ info->fbops->fb_copyarea = nv04_fbcon_copyarea;
+ info->fbops->fb_imageblit = nv04_fbcon_imageblit;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
new file mode 100644
index 00000000000..0c3cd53c731
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
+#define NV04_RAMFC__SIZE 32
+#define NV04_RAMFC_DMA_PUT 0x00
+#define NV04_RAMFC_DMA_GET 0x04
+#define NV04_RAMFC_DMA_INSTANCE 0x08
+#define NV04_RAMFC_DMA_STATE 0x0C
+#define NV04_RAMFC_DMA_FETCH 0x10
+#define NV04_RAMFC_ENGINE 0x14
+#define NV04_RAMFC_PULL1_ENGINE 0x18
+
+#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4)
+
+void
+nv04_fifo_disable(struct drm_device *dev)
+{
+ uint32_t tmp;
+
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
+}
+
+void
+nv04_fifo_enable(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+bool
+nv04_fifo_reassign(struct drm_device *dev, bool enable)
+{
+ uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
+ return (reassign == 1);
+}
+
+int
+nv04_fifo_channel_id(struct drm_device *dev)
+{
+ return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv04_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
+ NV04_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ /* Setup initial state */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ RAMFC_WR(DMA_PUT, chan->pushbuf_base);
+ RAMFC_WR(DMA_GET, chan->pushbuf_base);
+ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
+ RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0));
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /* enable the fifo dma operation */
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ return 0;
+}
+
+void
+nv04_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv04_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV04_RAMFC(chid), tmp;
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+ tmp = nv_ri32(dev, fc + 8);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv04_fifo_load_context(struct nouveau_channel *chan)
+{
+ uint32_t tmp;
+
+ nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+ nv04_fifo_do_load_context(chan->dev, chan->id);
+ nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+ tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+ nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv04_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan = NULL;
+ uint32_t tmp;
+ int chid;
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ return 0;
+
+ chan = dev_priv->fifos[chid];
+ if (!chan) {
+ NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
+ return -EINVAL;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
+ tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
+ RAMFC_WR(DMA_INSTANCE, tmp);
+ RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+ RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
+ RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+ RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv04_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+ return 0;
+}
+
+static void
+nv04_fifo_init_reset(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, 0x003224, 0x000f0078);
+ nv_wr32(dev, 0x002044, 0x0101ffff);
+ nv_wr32(dev, 0x002040, 0x000000ff);
+ nv_wr32(dev, 0x002500, 0x00000000);
+ nv_wr32(dev, 0x003000, 0x00000000);
+ nv_wr32(dev, 0x003050, 0x00000000);
+ nv_wr32(dev, 0x003200, 0x00000000);
+ nv_wr32(dev, 0x003250, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000000);
+
+ nv_wr32(dev, 0x003250, 0x00000000);
+ nv_wr32(dev, 0x003270, 0x00000000);
+ nv_wr32(dev, 0x003210, 0x00000000);
+}
+
+static void
+nv04_fifo_init_ramxx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+}
+
+static void
+nv04_fifo_init_intr(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv04_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ int i;
+
+ nv04_fifo_init_reset(dev);
+ nv04_fifo_init_ramxx(dev);
+
+ nv04_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+ nv04_fifo_init_intr(dev);
+ pfifo->enable(dev);
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ if (dev_priv->fifos[i]) {
+ uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+ nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
new file mode 100644
index 00000000000..396ee92118f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+static uint32_t nv04_graph_ctx_regs[] = {
+ NV04_PGRAPH_CTX_SWITCH1,
+ NV04_PGRAPH_CTX_SWITCH2,
+ NV04_PGRAPH_CTX_SWITCH3,
+ NV04_PGRAPH_CTX_SWITCH4,
+ NV04_PGRAPH_CTX_CACHE1,
+ NV04_PGRAPH_CTX_CACHE2,
+ NV04_PGRAPH_CTX_CACHE3,
+ NV04_PGRAPH_CTX_CACHE4,
+ 0x00400184,
+ 0x004001a4,
+ 0x004001c4,
+ 0x004001e4,
+ 0x00400188,
+ 0x004001a8,
+ 0x004001c8,
+ 0x004001e8,
+ 0x0040018c,
+ 0x004001ac,
+ 0x004001cc,
+ 0x004001ec,
+ 0x00400190,
+ 0x004001b0,
+ 0x004001d0,
+ 0x004001f0,
+ 0x00400194,
+ 0x004001b4,
+ 0x004001d4,
+ 0x004001f4,
+ 0x00400198,
+ 0x004001b8,
+ 0x004001d8,
+ 0x004001f8,
+ 0x0040019c,
+ 0x004001bc,
+ 0x004001dc,
+ 0x004001fc,
+ 0x00400174,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV04_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV04_PGRAPH_SURFACE,
+ NV04_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV04_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM+0x00,
+ NV04_PGRAPH_PATT_COLORRAM+0x01,
+ NV04_PGRAPH_PATT_COLORRAM+0x02,
+ NV04_PGRAPH_PATT_COLORRAM+0x03,
+ NV04_PGRAPH_PATT_COLORRAM+0x04,
+ NV04_PGRAPH_PATT_COLORRAM+0x05,
+ NV04_PGRAPH_PATT_COLORRAM+0x06,
+ NV04_PGRAPH_PATT_COLORRAM+0x07,
+ NV04_PGRAPH_PATT_COLORRAM+0x08,
+ NV04_PGRAPH_PATT_COLORRAM+0x09,
+ NV04_PGRAPH_PATT_COLORRAM+0x0A,
+ NV04_PGRAPH_PATT_COLORRAM+0x0B,
+ NV04_PGRAPH_PATT_COLORRAM+0x0C,
+ NV04_PGRAPH_PATT_COLORRAM+0x0D,
+ NV04_PGRAPH_PATT_COLORRAM+0x0E,
+ NV04_PGRAPH_PATT_COLORRAM+0x0F,
+ NV04_PGRAPH_PATT_COLORRAM+0x10,
+ NV04_PGRAPH_PATT_COLORRAM+0x11,
+ NV04_PGRAPH_PATT_COLORRAM+0x12,
+ NV04_PGRAPH_PATT_COLORRAM+0x13,
+ NV04_PGRAPH_PATT_COLORRAM+0x14,
+ NV04_PGRAPH_PATT_COLORRAM+0x15,
+ NV04_PGRAPH_PATT_COLORRAM+0x16,
+ NV04_PGRAPH_PATT_COLORRAM+0x17,
+ NV04_PGRAPH_PATT_COLORRAM+0x18,
+ NV04_PGRAPH_PATT_COLORRAM+0x19,
+ NV04_PGRAPH_PATT_COLORRAM+0x1A,
+ NV04_PGRAPH_PATT_COLORRAM+0x1B,
+ NV04_PGRAPH_PATT_COLORRAM+0x1C,
+ NV04_PGRAPH_PATT_COLORRAM+0x1D,
+ NV04_PGRAPH_PATT_COLORRAM+0x1E,
+ NV04_PGRAPH_PATT_COLORRAM+0x1F,
+ NV04_PGRAPH_PATT_COLORRAM+0x20,
+ NV04_PGRAPH_PATT_COLORRAM+0x21,
+ NV04_PGRAPH_PATT_COLORRAM+0x22,
+ NV04_PGRAPH_PATT_COLORRAM+0x23,
+ NV04_PGRAPH_PATT_COLORRAM+0x24,
+ NV04_PGRAPH_PATT_COLORRAM+0x25,
+ NV04_PGRAPH_PATT_COLORRAM+0x26,
+ NV04_PGRAPH_PATT_COLORRAM+0x27,
+ NV04_PGRAPH_PATT_COLORRAM+0x28,
+ NV04_PGRAPH_PATT_COLORRAM+0x29,
+ NV04_PGRAPH_PATT_COLORRAM+0x2A,
+ NV04_PGRAPH_PATT_COLORRAM+0x2B,
+ NV04_PGRAPH_PATT_COLORRAM+0x2C,
+ NV04_PGRAPH_PATT_COLORRAM+0x2D,
+ NV04_PGRAPH_PATT_COLORRAM+0x2E,
+ NV04_PGRAPH_PATT_COLORRAM+0x2F,
+ NV04_PGRAPH_PATT_COLORRAM+0x30,
+ NV04_PGRAPH_PATT_COLORRAM+0x31,
+ NV04_PGRAPH_PATT_COLORRAM+0x32,
+ NV04_PGRAPH_PATT_COLORRAM+0x33,
+ NV04_PGRAPH_PATT_COLORRAM+0x34,
+ NV04_PGRAPH_PATT_COLORRAM+0x35,
+ NV04_PGRAPH_PATT_COLORRAM+0x36,
+ NV04_PGRAPH_PATT_COLORRAM+0x37,
+ NV04_PGRAPH_PATT_COLORRAM+0x38,
+ NV04_PGRAPH_PATT_COLORRAM+0x39,
+ NV04_PGRAPH_PATT_COLORRAM+0x3A,
+ NV04_PGRAPH_PATT_COLORRAM+0x3B,
+ NV04_PGRAPH_PATT_COLORRAM+0x3C,
+ NV04_PGRAPH_PATT_COLORRAM+0x3D,
+ NV04_PGRAPH_PATT_COLORRAM+0x3E,
+ NV04_PGRAPH_PATT_COLORRAM+0x3F,
+ NV04_PGRAPH_PATTERN,
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ 0x00400600,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ NV04_PGRAPH_CONTROL0,
+ NV04_PGRAPH_CONTROL1,
+ NV04_PGRAPH_CONTROL2,
+ NV04_PGRAPH_BLEND,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400400,
+ 0x00400480,
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400534,
+ 0x00400538,
+ 0x00400514,
+ 0x00400518,
+ 0x0040051c,
+ 0x00400520,
+ 0x00400524,
+ 0x00400528,
+ 0x0040052c,
+ 0x00400530,
+ 0x00400d00,
+ 0x00400d40,
+ 0x00400d80,
+ 0x00400d04,
+ 0x00400d44,
+ 0x00400d84,
+ 0x00400d08,
+ 0x00400d48,
+ 0x00400d88,
+ 0x00400d0c,
+ 0x00400d4c,
+ 0x00400d8c,
+ 0x00400d10,
+ 0x00400d50,
+ 0x00400d90,
+ 0x00400d14,
+ 0x00400d54,
+ 0x00400d94,
+ 0x00400d18,
+ 0x00400d58,
+ 0x00400d98,
+ 0x00400d1c,
+ 0x00400d5c,
+ 0x00400d9c,
+ 0x00400d20,
+ 0x00400d60,
+ 0x00400da0,
+ 0x00400d24,
+ 0x00400d64,
+ 0x00400da4,
+ 0x00400d28,
+ 0x00400d68,
+ 0x00400da8,
+ 0x00400d2c,
+ 0x00400d6c,
+ 0x00400dac,
+ 0x00400d30,
+ 0x00400d70,
+ 0x00400db0,
+ 0x00400d34,
+ 0x00400d74,
+ 0x00400db4,
+ 0x00400d38,
+ 0x00400d78,
+ 0x00400db8,
+ 0x00400d3c,
+ 0x00400d7c,
+ 0x00400dbc,
+ 0x00400590,
+ 0x00400594,
+ 0x00400598,
+ 0x0040059c,
+ 0x004005a8,
+ 0x004005ac,
+ 0x004005b0,
+ 0x004005b4,
+ 0x004005c0,
+ 0x004005c4,
+ 0x004005c8,
+ 0x004005cc,
+ 0x004005d0,
+ 0x004005d4,
+ 0x004005d8,
+ 0x004005dc,
+ 0x004005e0,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV04_PGRAPH_DVD_COLORFMT,
+ NV04_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ 0x00400500,
+ 0x00400504,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2
+
+
+};
+
+struct graph_state {
+ int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+};
+
+struct nouveau_channel *
+nv04_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chid = dev_priv->engine.fifo.channels;
+
+ if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
+ chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
+
+ if (chid >= dev_priv->engine.fifo.channels)
+ return NULL;
+
+ return dev_priv->fifos[chid];
+}
+
+void
+nv04_graph_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_channel *chan = NULL;
+ int chid;
+
+ pgraph->fifo_access(dev, false);
+ nouveau_wait_for_idle(dev);
+
+ /* If previous context is valid, we need to save it */
+ pgraph->unload_context(dev);
+
+ /* Load context for next channel */
+ chid = dev_priv->engine.fifo.channel_id(dev);
+ chan = dev_priv->fifos[chid];
+ if (chan)
+ nv04_graph_load_context(chan);
+
+ pgraph->fifo_access(dev, true);
+}
+
+int nv04_graph_create_context(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx;
+ NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
+
+ chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
+ GFP_KERNEL);
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+
+ /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
+ pgraph_ctx->nv04[0] = 0x0001ffff;
+ /* is it really needed ??? */
+#if 0
+ dev_priv->fifos[channel].pgraph_ctx[1] =
+ nv_rd32(dev, NV_PGRAPH_DEBUG_4);
+ dev_priv->fifos[channel].pgraph_ctx[2] =
+ nv_rd32(dev, 0x004006b0);
+#endif
+ return 0;
+}
+
+void nv04_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+
+ kfree(pgraph_ctx);
+ chan->pgraph_ctx = NULL;
+}
+
+int nv04_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ uint32_t tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
+
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
+ tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
+ nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+ return 0;
+}
+
+int
+nv04_graph_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_channel *chan = NULL;
+ struct graph_state *ctx;
+ uint32_t tmp;
+ int i;
+
+ chan = pgraph->channel(dev);
+ if (!chan)
+ return 0;
+ ctx = chan->pgraph_ctx;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
+
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+int nv04_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ /* Enable PGRAPH interrupts */
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
+ nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+ /*1231C000 blob, 001 haiku*/
+ //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
+ /*0x72111100 blob , 01 haiku*/
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+ /*haiku same*/
+
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+ /*haiku and blob 10d4*/
+
+ nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= dev_priv->engine.fifo.channels << 24;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+
+ /* These don't belong here, they're part of a per-channel context */
+ nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
+
+ return 0;
+}
+
+void nv04_graph_takedown(struct drm_device *dev)
+{
+}
+
+void
+nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+ if (enabled)
+ nv_wr32(dev, NV04_PGRAPH_FIFO,
+ nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
+ else
+ nv_wr32(dev, NV04_PGRAPH_FIFO,
+ nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
+}
+
+static int
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ chan->fence.last_sequence_irq = data;
+ nouveau_fence_handler(chan->dev, chan->id);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
+ int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+ uint32_t tmp;
+
+ tmp = nv_ri32(dev, instance);
+ tmp &= ~0x00038000;
+ tmp |= ((data & 7) << 15);
+
+ nv_wi32(dev, instance, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp);
+ return 0;
+}
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
+ { 0x0150, nv04_graph_mthd_set_ref },
+ {}
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
+ { 0x0039, false, nv04_graph_mthds_m2mf },
+ { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
+ { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
+ { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
+ { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+ { 0x0030, false, NULL }, /* null */
+ { 0x0042, false, NULL }, /* surf2d */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x0052, false, NULL }, /* swzsurf */
+ { 0x0053, false, NULL }, /* surf3d */
+ { 0x0054, false, NULL }, /* tex_tri */
+ { 0x0055, false, NULL }, /* multitex_tri */
+ {}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
new file mode 100644
index 00000000000..a20c206625a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -0,0 +1,208 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+/* returns the size of fifo context */
+static int
+nouveau_fifo_ctx_size(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset >= 0x40)
+ return 128;
+ else
+ if (dev_priv->chipset >= 0x17)
+ return 64;
+
+ return 32;
+}
+
+static void
+nv04_instmem_determine_amount(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Figure out how much instance memory we need */
+ if (dev_priv->card_type >= NV_40) {
+ /* We'll want more instance memory than this on some NV4x cards.
+ * There's a 16MB aperture to play with that maps onto the end
+ * of vram. For now, only reserve a small piece until we know
+ * more about what each chipset requires.
+ */
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x40:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+ break;
+ default:
+ dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+ break;
+ }
+ } else {
+ /*XXX: what *are* the limits on <NV40 cards?
+ */
+ dev_priv->ramin_rsvd_vram = (512 * 1024);
+ }
+ NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
+
+ /* Clear all of it, except the BIOS image that's in the first 64KiB */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
+ nv_wi32(dev, i, 0x00000000);
+ dev_priv->engine.instmem.finish_access(dev);
+}
+
+static void
+nv04_instmem_configure_fixed_tables(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ /* FIFO hash table (RAMHT)
+ * use 4k hash table at RAMIN+0x10000
+ * TODO: extend the hash table
+ */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */
+ dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */
+ NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
+ dev_priv->ramht_size);
+
+ /* FIFO runout table (RAMRO) - 512k at 0x11200 */
+ dev_priv->ramro_offset = 0x11200;
+ dev_priv->ramro_size = 512;
+ NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
+ dev_priv->ramro_size);
+
+ /* FIFO context table (RAMFC)
+ * NV40 : Not sure exactly how to position RAMFC on some cards,
+ * 0x30002 seems to position it at RAMIN+0x20000 on these
+ * cards. RAMFC is 4kb (32 fifos, 128byte entries).
+ * Others: Position RAMFC at RAMIN+0x11400
+ */
+ dev_priv->ramfc_size = engine->fifo.channels *
+ nouveau_fifo_ctx_size(dev);
+ switch (dev_priv->card_type) {
+ case NV_40:
+ dev_priv->ramfc_offset = 0x20000;
+ break;
+ case NV_30:
+ case NV_20:
+ case NV_10:
+ case NV_04:
+ default:
+ dev_priv->ramfc_offset = 0x11400;
+ break;
+ }
+ NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
+ dev_priv->ramfc_size);
+}
+
+int nv04_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t offset;
+ int ret = 0;
+
+ nv04_instmem_determine_amount(dev);
+ nv04_instmem_configure_fixed_tables(dev);
+
+ /* Create a heap to manage RAMIN allocations, we don't allocate
+ * the space that was reserved for RAMHT/FC/RO.
+ */
+ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+
+ /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
+ * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
+ * ("new style" control) the upper 16-bits of 0x2220 points at this
+ * other mysterious table that's clobbering important things.
+ *
+ * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
+ * smashed to pieces on us, so reserve 0x30000-0x40000 too..
+ */
+ if (dev_priv->card_type >= NV_40) {
+ if (offset < 0x40000)
+ offset = 0x40000;
+ }
+
+ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ offset, dev_priv->ramin_rsvd_vram - offset);
+ if (ret) {
+ dev_priv->ramin_heap = NULL;
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ }
+
+ return ret;
+}
+
+void
+nv04_instmem_takedown(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+{
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ return 0;
+}
+
+void
+nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->engine.instmem.unbind(dev, gpuobj);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (!gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
+void
+nv04_instmem_prepare_access(struct drm_device *dev, bool write)
+{
+}
+
+void
+nv04_instmem_finish_access(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_suspend(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nv04_instmem_resume(struct drm_device *dev)
+{
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
new file mode 100644
index 00000000000..617ed1e0526
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -0,0 +1,20 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_mc_init(struct drm_device *dev)
+{
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+ return 0;
+}
+
+void
+nv04_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
new file mode 100644
index 00000000000..1d09ddd5739
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -0,0 +1,51 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_timer_init(struct drm_device *dev)
+{
+ nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
+ nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
+
+ /* Just use the pre-existing values when possible for now; these regs
+ * are not written in nv (driver writer missed a /4 on the address), and
+ * writing 8 and 3 to the correct regs breaks the timings on the LVDS
+ * hardware sequencing microcode.
+ * A correct solution (involving calculations with the GPU PLL) can
+ * be done when kernel modesetting lands
+ */
+ if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
+ !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
+ nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
+ nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
+ }
+
+ return 0;
+}
+
+uint64_t
+nv04_timer_read(struct drm_device *dev)
+{
+ uint32_t low;
+ /* From kmmio dumps on nv28 this looks like how the blob does this.
+ * It reads the high dword twice, before and after.
+ * The only explanation seems to be that the 64-bit timer counter
+ * advances between high and low dword reads and may corrupt the
+ * result. Not confirmed.
+ */
+ uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ uint32_t high1;
+ do {
+ high1 = high2;
+ low = nv_rd32(dev, NV04_PTIMER_TIME_0);
+ high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ } while (high1 != high2);
+ return (((uint64_t)high2) << 32) | (uint64_t)low;
+}
+
+void
+nv04_timer_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
new file mode 100644
index 00000000000..9c63099e9c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "drm_crtc_helper.h"
+
+#include "i2c/ch7006.h"
+
+static struct {
+ struct i2c_board_info board_info;
+ struct drm_encoder_funcs funcs;
+ struct drm_encoder_helper_funcs hfuncs;
+ void *params;
+
+} nv04_tv_encoder_info[] = {
+ {
+ .board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
+ .params = &(struct ch7006_encoder_params) {
+ CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
+ 0, 0, 0,
+ CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
+ CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
+ },
+ },
+};
+
+static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
+{
+ struct i2c_msg msg = {
+ .addr = addr,
+ .len = 0,
+ };
+
+ return i2c_transfer(adapter, &msg, 1) == 1;
+}
+
+int nv04_tv_identify(struct drm_device *dev, int i2c_index)
+{
+ struct nouveau_i2c_chan *i2c;
+ bool was_locked;
+ int i, ret;
+
+ NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
+
+ i2c = nouveau_i2c_find(dev, i2c_index);
+ if (!i2c)
+ return -ENODEV;
+
+ was_locked = NVLockVgaCrtcs(dev, false);
+
+ for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
+ if (probe_i2c_addr(&i2c->adapter,
+ nv04_tv_encoder_info[i].board_info.addr)) {
+ ret = i;
+ break;
+ }
+ }
+
+ if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
+ NV_TRACE(dev, "Detected TV encoder: %s\n",
+ nv04_tv_encoder_info[i].board_info.type);
+
+ } else {
+ NV_TRACE(dev, "No TV encoders found.\n");
+ i = -ENODEV;
+ }
+
+ NVLockVgaCrtcs(dev, was_locked);
+ return i;
+}
+
+#define PLLSEL_TV_CRTC1_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
+#define PLLSEL_TV_CRTC2_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ uint8_t crtc1A;
+
+ NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ int head = nouveau_crtc(encoder->crtc)->index;
+ crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
+
+ state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
+ PLLSEL_TV_CRTC1_MASK;
+
+ /* Inhibit hsync */
+ crtc1A |= 0x80;
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+ }
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+
+ to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
+}
+
+static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
+
+ state->tv_setup = 0;
+
+ if (bind) {
+ state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+ state->CRTC[NV_CIO_CRE_49] |= 0x10;
+ } else {
+ state->CRTC[NV_CIO_CRE_49] &= ~0x10;
+ }
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
+ state->CRTC[NV_CIO_CRE_LCD__INDEX]);
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
+ state->CRTC[NV_CIO_CRE_49]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
+ state->tv_setup);
+}
+
+static void nv04_tv_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_disable(dev, head);
+
+ if (nv_two_heads(dev))
+ nv04_tv_bind(dev, head ^ 1, false);
+
+ nv04_tv_bind(dev, head, true);
+}
+
+static void nv04_tv_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+ regp->tv_htotal = adjusted_mode->htotal;
+ regp->tv_vtotal = adjusted_mode->vtotal;
+
+ /* These delay the TV signals with respect to the VGA port,
+ * they might be useful if we ever allow a CRTC to drive
+ * multiple outputs.
+ */
+ regp->tv_hskew = 1;
+ regp->tv_hsync_delay = 1;
+ regp->tv_hsync_delay2 = 64;
+ regp->tv_vskew = 1;
+ regp->tv_vsync_delay = 1;
+
+ to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
+}
+
+static void nv04_tv_commit(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
+ '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv04_tv_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
+
+ drm_encoder_cleanup(encoder);
+
+ kfree(nv_encoder);
+}
+
+int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct i2c_adapter *adap;
+ struct drm_encoder_funcs *funcs = NULL;
+ struct drm_encoder_helper_funcs *hfuncs = NULL;
+ struct drm_encoder_slave_funcs *sfuncs = NULL;
+ int i2c_index = entry->i2c_index;
+ int type, ret;
+ bool was_locked;
+
+ /* Ensure that we can talk to this encoder */
+ type = nv04_tv_identify(dev, i2c_index);
+ if (type < 0)
+ return type;
+
+ /* Allocate the necessary memory */
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+
+ /* Initialize the common members */
+ encoder = to_drm_encoder(nv_encoder);
+
+ funcs = &nv04_tv_encoder_info[type].funcs;
+ hfuncs = &nv04_tv_encoder_info[type].hfuncs;
+
+ drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_helper_add(encoder, hfuncs);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ /* Run the slave-specific initialization */
+ adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
+
+ was_locked = NVLockVgaCrtcs(dev, false);
+
+ ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
+ &nv04_tv_encoder_info[type].board_info);
+
+ NVLockVgaCrtcs(dev, was_locked);
+
+ if (ret < 0)
+ goto fail;
+
+ /* Fill the function pointers */
+ sfuncs = to_encoder_slave(encoder)->slave_funcs;
+
+ *funcs = (struct drm_encoder_funcs) {
+ .destroy = nv04_tv_destroy,
+ };
+
+ *hfuncs = (struct drm_encoder_helper_funcs) {
+ .dpms = nv04_tv_dpms,
+ .save = sfuncs->save,
+ .restore = sfuncs->restore,
+ .mode_fixup = sfuncs->mode_fixup,
+ .prepare = nv04_tv_prepare,
+ .commit = nv04_tv_commit,
+ .mode_set = nv04_tv_mode_set,
+ .detect = sfuncs->detect,
+ };
+
+ /* Set the slave encoder configuration */
+ sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
+
+ return 0;
+
+fail:
+ drm_encoder_cleanup(encoder);
+
+ kfree(nv_encoder);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
new file mode 100644
index 00000000000..79e2d104d70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -0,0 +1,24 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv10_fb_init(struct drm_device *dev)
+{
+ uint32_t fb_bar_size;
+ int i;
+
+ fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, NV10_PFB_TILE(i), 0);
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
+ }
+
+ return 0;
+}
+
+void
+nv10_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
new file mode 100644
index 00000000000..7aeabf262bc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
+#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
+
+int
+nv10_fifo_channel_id(struct drm_device *dev)
+{
+ return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv10_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_device *dev = chan->dev;
+ uint32_t fc = NV10_RAMFC(chan->id);
+ int ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
+ NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ /* Fill entries that are seen filled in dumps of nvidia driver just
+ * after channel's is put into DMA mode
+ */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wi32(dev, fc + 0, chan->pushbuf_base);
+ nv_wi32(dev, fc + 4, chan->pushbuf_base);
+ nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /* enable the fifo dma operation */
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ return 0;
+}
+
+void
+nv10_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv10_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV10_RAMFC(chid), tmp;
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
+
+ tmp = nv_ri32(dev, fc + 12);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
+
+ if (dev_priv->chipset < 0x17)
+ goto out;
+
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
+ tmp = nv_ri32(dev, fc + 36);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
+
+out:
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv10_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t tmp;
+
+ nv10_fifo_do_load_context(dev, chan->id);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv10_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ uint32_t fc, tmp;
+ int chid;
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ return 0;
+ fc = NV10_RAMFC(chid);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+
+ nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
+ tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
+ nv_wi32(dev, fc + 12, tmp);
+ nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+ nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
+ nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+ nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+
+ if (dev_priv->chipset < 0x17)
+ goto out;
+
+ nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+ tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
+ nv_wi32(dev, fc + 36, tmp);
+ nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+ nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
+ nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+
+out:
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv10_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+ return 0;
+}
+
+static void
+nv10_fifo_init_reset(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, 0x003224, 0x000f0078);
+ nv_wr32(dev, 0x002044, 0x0101ffff);
+ nv_wr32(dev, 0x002040, 0x000000ff);
+ nv_wr32(dev, 0x002500, 0x00000000);
+ nv_wr32(dev, 0x003000, 0x00000000);
+ nv_wr32(dev, 0x003050, 0x00000000);
+
+ nv_wr32(dev, 0x003258, 0x00000000);
+ nv_wr32(dev, 0x003210, 0x00000000);
+ nv_wr32(dev, 0x003270, 0x00000000);
+}
+
+static void
+nv10_fifo_init_ramxx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+
+ if (dev_priv->chipset < 0x17) {
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+ } else {
+ nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
+ (1 << 16) /* 64 Bytes entry*/);
+ /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
+ }
+}
+
+static void
+nv10_fifo_init_intr(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv10_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ int i;
+
+ nv10_fifo_init_reset(dev);
+ nv10_fifo_init_ramxx(dev);
+
+ nv10_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+ nv10_fifo_init_intr(dev);
+ pfifo->enable(dev);
+ pfifo->reassign(dev, true);
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ if (dev_priv->fifos[i]) {
+ uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+ nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
new file mode 100644
index 00000000000..6bf6804bb0e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -0,0 +1,892 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+#define NV10_FIFO_NUMBER 32
+
+struct pipe_state {
+ uint32_t pipe_0x0000[0x040/4];
+ uint32_t pipe_0x0040[0x010/4];
+ uint32_t pipe_0x0200[0x0c0/4];
+ uint32_t pipe_0x4400[0x080/4];
+ uint32_t pipe_0x6400[0x3b0/4];
+ uint32_t pipe_0x6800[0x2f0/4];
+ uint32_t pipe_0x6c00[0x030/4];
+ uint32_t pipe_0x7000[0x130/4];
+ uint32_t pipe_0x7400[0x0c0/4];
+ uint32_t pipe_0x7800[0x0c0/4];
+};
+
+static int nv10_graph_ctx_regs[] = {
+ NV10_PGRAPH_CTX_SWITCH1,
+ NV10_PGRAPH_CTX_SWITCH2,
+ NV10_PGRAPH_CTX_SWITCH3,
+ NV10_PGRAPH_CTX_SWITCH4,
+ NV10_PGRAPH_CTX_SWITCH5,
+ NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
+ NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
+ NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
+ NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
+ NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
+ 0x00400164,
+ 0x00400184,
+ 0x004001a4,
+ 0x004001c4,
+ 0x004001e4,
+ 0x00400168,
+ 0x00400188,
+ 0x004001a8,
+ 0x004001c8,
+ 0x004001e8,
+ 0x0040016c,
+ 0x0040018c,
+ 0x004001ac,
+ 0x004001cc,
+ 0x004001ec,
+ 0x00400170,
+ 0x00400190,
+ 0x004001b0,
+ 0x004001d0,
+ 0x004001f0,
+ 0x00400174,
+ 0x00400194,
+ 0x004001b4,
+ 0x004001d4,
+ 0x004001f4,
+ 0x00400178,
+ 0x00400198,
+ 0x004001b8,
+ 0x004001d8,
+ 0x004001f8,
+ 0x0040017c,
+ 0x0040019c,
+ 0x004001bc,
+ 0x004001dc,
+ 0x004001fc,
+ NV10_PGRAPH_CTX_USER,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV10_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV10_PGRAPH_SURFACE,
+ NV10_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV10_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
+ 0x00400904,
+ 0x00400908,
+ 0x0040090c,
+ 0x00400910,
+ 0x00400914,
+ 0x00400918,
+ 0x0040091c,
+ 0x00400920,
+ 0x00400924,
+ 0x00400928,
+ 0x0040092c,
+ 0x00400930,
+ 0x00400934,
+ 0x00400938,
+ 0x0040093c,
+ 0x00400940,
+ 0x00400944,
+ 0x00400948,
+ 0x0040094c,
+ 0x00400950,
+ 0x00400954,
+ 0x00400958,
+ 0x0040095c,
+ 0x00400960,
+ 0x00400964,
+ 0x00400968,
+ 0x0040096c,
+ 0x00400970,
+ 0x00400974,
+ 0x00400978,
+ 0x0040097c,
+ 0x00400980,
+ 0x00400984,
+ 0x00400988,
+ 0x0040098c,
+ 0x00400990,
+ 0x00400994,
+ 0x00400998,
+ 0x0040099c,
+ 0x004009a0,
+ 0x004009a4,
+ 0x004009a8,
+ 0x004009ac,
+ 0x004009b0,
+ 0x004009b4,
+ 0x004009b8,
+ 0x004009bc,
+ 0x004009c0,
+ 0x004009c4,
+ 0x004009c8,
+ 0x004009cc,
+ 0x004009d0,
+ 0x004009d4,
+ 0x004009d8,
+ 0x004009dc,
+ 0x004009e0,
+ 0x004009e4,
+ 0x004009e8,
+ 0x004009ec,
+ 0x004009f0,
+ 0x004009f4,
+ 0x004009f8,
+ 0x004009fc,
+ NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ NV03_PGRAPH_MONO_COLOR0,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ 0x00400e70,
+ 0x00400e74,
+ 0x00400e78,
+ 0x00400e7c,
+ 0x00400e80,
+ 0x00400e84,
+ 0x00400e88,
+ 0x00400e8c,
+ 0x00400ea0,
+ 0x00400ea4,
+ 0x00400ea8,
+ 0x00400e90,
+ 0x00400e94,
+ 0x00400e98,
+ 0x00400e9c,
+ NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
+ NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
+ 0x00400f04,
+ 0x00400f24,
+ 0x00400f08,
+ 0x00400f28,
+ 0x00400f0c,
+ 0x00400f2c,
+ 0x00400f10,
+ 0x00400f30,
+ 0x00400f14,
+ 0x00400f34,
+ 0x00400f18,
+ 0x00400f38,
+ 0x00400f1c,
+ 0x00400f3c,
+ NV10_PGRAPH_XFMODE0,
+ NV10_PGRAPH_XFMODE1,
+ NV10_PGRAPH_GLOBALSTATE0,
+ NV10_PGRAPH_GLOBALSTATE1,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
+ NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ NV03_PGRAPH_ABS_UCLIP_XMIN,
+ NV03_PGRAPH_ABS_UCLIP_XMAX,
+ NV03_PGRAPH_ABS_UCLIP_YMIN,
+ NV03_PGRAPH_ABS_UCLIP_YMAX,
+ 0x00400550,
+ 0x00400558,
+ 0x00400554,
+ 0x0040055c,
+ NV03_PGRAPH_ABS_UCLIPA_XMIN,
+ NV03_PGRAPH_ABS_UCLIPA_XMAX,
+ NV03_PGRAPH_ABS_UCLIPA_YMIN,
+ NV03_PGRAPH_ABS_UCLIPA_YMAX,
+ NV03_PGRAPH_ABS_ICLIP_XMAX,
+ NV03_PGRAPH_ABS_ICLIP_YMAX,
+ NV03_PGRAPH_XY_LOGIC_MISC0,
+ NV03_PGRAPH_XY_LOGIC_MISC1,
+ NV03_PGRAPH_XY_LOGIC_MISC2,
+ NV03_PGRAPH_XY_LOGIC_MISC3,
+ NV03_PGRAPH_CLIPX_0,
+ NV03_PGRAPH_CLIPX_1,
+ NV03_PGRAPH_CLIPY_0,
+ NV03_PGRAPH_CLIPY_1,
+ NV10_PGRAPH_COMBINER0_IN_ALPHA,
+ NV10_PGRAPH_COMBINER1_IN_ALPHA,
+ NV10_PGRAPH_COMBINER0_IN_RGB,
+ NV10_PGRAPH_COMBINER1_IN_RGB,
+ NV10_PGRAPH_COMBINER_COLOR0,
+ NV10_PGRAPH_COMBINER_COLOR1,
+ NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+ NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+ NV10_PGRAPH_COMBINER0_OUT_RGB,
+ NV10_PGRAPH_COMBINER1_OUT_RGB,
+ NV10_PGRAPH_COMBINER_FINAL0,
+ NV10_PGRAPH_COMBINER_FINAL1,
+ 0x00400e00,
+ 0x00400e04,
+ 0x00400e08,
+ 0x00400e0c,
+ 0x00400e10,
+ 0x00400e14,
+ 0x00400e18,
+ 0x00400e1c,
+ 0x00400e20,
+ 0x00400e24,
+ 0x00400e28,
+ 0x00400e2c,
+ 0x00400e30,
+ 0x00400e34,
+ 0x00400e38,
+ 0x00400e3c,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV10_PGRAPH_DIMX_TEXTURE,
+ NV10_PGRAPH_WDIMX_TEXTURE,
+ NV10_PGRAPH_DVD_COLORFMT,
+ NV10_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ NV03_PGRAPH_X_MISC,
+ NV03_PGRAPH_Y_MISC,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2,
+};
+
+static int nv17_graph_ctx_regs[] = {
+ NV10_PGRAPH_DEBUG_4,
+ 0x004006b0,
+ 0x00400eac,
+ 0x00400eb0,
+ 0x00400eb4,
+ 0x00400eb8,
+ 0x00400ebc,
+ 0x00400ec0,
+ 0x00400ec4,
+ 0x00400ec8,
+ 0x00400ecc,
+ 0x00400ed0,
+ 0x00400ed4,
+ 0x00400ed8,
+ 0x00400edc,
+ 0x00400ee0,
+ 0x00400a00,
+ 0x00400a04,
+};
+
+struct graph_state {
+ int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
+ int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
+ struct pipe_state pipe_state;
+};
+
+static void nv10_graph_save_pipe(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ int i;
+#define PIPE_SAVE(addr) \
+ do { \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
+ fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
+ } while (0)
+
+ PIPE_SAVE(0x4400);
+ PIPE_SAVE(0x0200);
+ PIPE_SAVE(0x6400);
+ PIPE_SAVE(0x6800);
+ PIPE_SAVE(0x6c00);
+ PIPE_SAVE(0x7000);
+ PIPE_SAVE(0x7400);
+ PIPE_SAVE(0x7800);
+ PIPE_SAVE(0x0040);
+ PIPE_SAVE(0x0000);
+
+#undef PIPE_SAVE
+}
+
+static void nv10_graph_load_pipe(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ int i;
+ uint32_t xfmode0, xfmode1;
+#define PIPE_RESTORE(addr) \
+ do { \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
+ } while (0)
+
+
+ nouveau_wait_for_idle(dev);
+ /* XXX check haiku comments */
+ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+
+ PIPE_RESTORE(0x0200);
+ nouveau_wait_for_idle(dev);
+
+ /* restore XFMODE */
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(0x6400);
+ PIPE_RESTORE(0x6800);
+ PIPE_RESTORE(0x6c00);
+ PIPE_RESTORE(0x7000);
+ PIPE_RESTORE(0x7400);
+ PIPE_RESTORE(0x7800);
+ PIPE_RESTORE(0x4400);
+ PIPE_RESTORE(0x0000);
+ PIPE_RESTORE(0x0040);
+ nouveau_wait_for_idle(dev);
+
+#undef PIPE_RESTORE
+}
+
+static void nv10_graph_create_pipe(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ uint32_t *fifo_pipe_state_addr;
+ int i;
+#define PIPE_INIT(addr) \
+ do { \
+ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
+ } while (0)
+#define PIPE_INIT_END(addr) \
+ do { \
+ uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
+ ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
+ if (fifo_pipe_state_addr != __end_addr) \
+ NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
+ addr, fifo_pipe_state_addr, __end_addr); \
+ } while (0)
+#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
+
+ PIPE_INIT(0x0200);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0200);
+
+ PIPE_INIT(0x6400);
+ for (i = 0; i < 211; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ PIPE_INIT_END(0x6400);
+
+ PIPE_INIT(0x6800);
+ for (i = 0; i < 162; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ for (i = 0; i < 25; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6800);
+
+ PIPE_INIT(0x6c00);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0xbf800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6c00);
+
+ PIPE_INIT(0x7000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ for (i = 0; i < 35; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7000);
+
+ PIPE_INIT(0x7400);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7400);
+
+ PIPE_INIT(0x7800);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7800);
+
+ PIPE_INIT(0x4400);
+ for (i = 0; i < 32; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x4400);
+
+ PIPE_INIT(0x0000);
+ for (i = 0; i < 16; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0000);
+
+ PIPE_INIT(0x0040);
+ for (i = 0; i < 4; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0040);
+
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
+}
+
+static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
+ if (nv10_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
+ return -1;
+}
+
+static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
+ if (nv17_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
+ return -1;
+}
+
+int nv10_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ uint32_t tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+ nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
+ if (dev_priv->chipset >= 0x17) {
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+ nv_wr32(dev, nv17_graph_ctx_regs[i],
+ pgraph_ctx->nv17[i]);
+ }
+
+ nv10_graph_load_pipe(chan);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
+ tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
+ return 0;
+}
+
+int
+nv10_graph_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ struct graph_state *ctx;
+ uint32_t tmp;
+ int i;
+
+ chan = pgraph->channel(dev);
+ if (!chan)
+ return 0;
+ ctx = chan->pgraph_ctx;
+
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+ ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
+
+ if (dev_priv->chipset >= 0x17) {
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+ ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
+ }
+
+ nv10_graph_save_pipe(chan);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (pfifo->channels - 1) << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+void
+nv10_graph_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_channel *chan = NULL;
+ int chid;
+
+ pgraph->fifo_access(dev, false);
+ nouveau_wait_for_idle(dev);
+
+ /* If previous context is valid, we need to save it */
+ nv10_graph_unload_context(dev);
+
+ /* Load context for next channel */
+ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ chan = dev_priv->fifos[chid];
+ if (chan)
+ nv10_graph_load_context(chan);
+
+ pgraph->fifo_access(dev, true);
+}
+
+#define NV_WRITE_CTX(reg, val) do { \
+ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv10[offset] = val; \
+ } while (0)
+
+#define NV17_WRITE_CTX(reg, val) do { \
+ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv17[offset] = val; \
+ } while (0)
+
+struct nouveau_channel *
+nv10_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chid = dev_priv->engine.fifo.channels;
+
+ if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
+ chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
+
+ if (chid >= dev_priv->engine.fifo.channels)
+ return NULL;
+
+ return dev_priv->fifos[chid];
+}
+
+int nv10_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx;
+
+ NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
+
+ chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
+ GFP_KERNEL);
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+
+
+ NV_WRITE_CTX(0x00400e88, 0x08000000);
+ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
+ NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
+ NV_WRITE_CTX(0x00400e10, 0x00001000);
+ NV_WRITE_CTX(0x00400e14, 0x00001000);
+ NV_WRITE_CTX(0x00400e30, 0x00080008);
+ NV_WRITE_CTX(0x00400e34, 0x00080008);
+ if (dev_priv->chipset >= 0x17) {
+ /* is it really needed ??? */
+ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
+ nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
+ NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
+ NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400ec0, 0x00000080);
+ NV17_WRITE_CTX(0x00400ed0, 0x00000080);
+ }
+ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
+
+ nv10_graph_create_pipe(chan);
+ return 0;
+}
+
+void nv10_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+
+ kfree(pgraph_ctx);
+ chan->pgraph_ctx = NULL;
+}
+
+int nv10_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+ int i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
+ (1<<29) |
+ (1<<31));
+ if (dev_priv->chipset >= 0x17) {
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ nv_wr32(dev, 0x004006b0, 0x40000020);
+ } else
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, NV10_PGRAPH_TILE(i),
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
+ nv_rd32(dev, NV10_PFB_TSTATUS(i)));
+ }
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+
+ return 0;
+}
+
+void nv10_graph_takedown(struct drm_device *dev)
+{
+}
+
+struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x005f, false, NULL }, /* imageblit */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x0052, false, NULL }, /* swzsurf */
+ { 0x0093, false, NULL }, /* surf3d */
+ { 0x0094, false, NULL }, /* tex_tri */
+ { 0x0095, false, NULL }, /* multitex_tri */
+ { 0x0056, false, NULL }, /* celcius (nv10) */
+ { 0x0096, false, NULL }, /* celcius (nv11) */
+ { 0x0099, false, NULL }, /* celcius (nv17) */
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c
new file mode 100644
index 00000000000..2e58c331e9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_gpio.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+static bool
+get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
+ uint32_t *mask)
+{
+ if (ent->line < 2) {
+ *reg = NV_PCRTC_GPIO;
+ *shift = ent->line * 16;
+ *mask = 0x11;
+
+ } else if (ent->line < 10) {
+ *reg = NV_PCRTC_GPIO_EXT;
+ *shift = (ent->line - 2) * 4;
+ *mask = 0x3;
+
+ } else if (ent->line < 14) {
+ *reg = NV_PCRTC_850;
+ *shift = (ent->line - 10) * 4;
+ *mask = 0x3;
+
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+int
+nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+ struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+ uint32_t reg, shift, mask, value;
+
+ if (!ent)
+ return -ENODEV;
+
+ if (!get_gpio_location(ent, &reg, &shift, &mask))
+ return -ENODEV;
+
+ value = NVReadCRTC(dev, 0, reg) >> shift;
+
+ return (ent->invert ? 1 : 0) ^ (value & 1);
+}
+
+int
+nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+{
+ struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+ uint32_t reg, shift, mask, value;
+
+ if (!ent)
+ return -ENODEV;
+
+ if (!get_gpio_location(ent, &reg, &shift, &mask))
+ return -ENODEV;
+
+ value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
+ mask = ~(mask << shift);
+
+ NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
new file mode 100644
index 00000000000..46cfd9c6047
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -0,0 +1,681 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nv17_tv.h"
+
+enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ uint32_t pin_mask)
+{
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+ tv_enc->pin_mask = pin_mask >> 28 & 0xe;
+
+ switch (tv_enc->pin_mask) {
+ case 0x2:
+ case 0x4:
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+ break;
+ case 0xc:
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+ break;
+ case 0xe:
+ if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
+ else
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+ break;
+ default:
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+ break;
+ }
+
+ drm_connector_property_set_value(connector,
+ encoder->dev->mode_config.tv_subconnector_property,
+ tv_enc->subconnector);
+
+ return tv_enc->subconnector ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static const struct {
+ int hdisplay;
+ int vdisplay;
+} modes[] = {
+ { 640, 400 },
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 800, 600 },
+ { 1024, 768 },
+ { 1280, 720 },
+ { 1280, 1024 },
+ { 1920, 1080 }
+};
+
+static int nv17_tv_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ struct drm_display_mode *mode;
+ struct drm_display_mode *output_mode;
+ int n = 0;
+ int i;
+
+ if (tv_norm->kind != CTV_ENC_MODE) {
+ struct drm_display_mode *tv_mode;
+
+ for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ mode = drm_mode_duplicate(encoder->dev, tv_mode);
+
+ mode->clock = tv_norm->tv_enc_mode.vrefresh *
+ mode->htotal / 1000 *
+ mode->vtotal / 1000;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ mode->clock *= 2;
+
+ if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
+ mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ n++;
+ }
+ return n;
+ }
+
+ /* tv_norm->kind == CTV_ENC_MODE */
+ output_mode = &tv_norm->ctv_enc_mode.mode;
+ for (i = 0; i < ARRAY_SIZE(modes); i++) {
+ if (modes[i].hdisplay > output_mode->hdisplay ||
+ modes[i].vdisplay > output_mode->vdisplay)
+ continue;
+
+ if (modes[i].hdisplay == output_mode->hdisplay &&
+ modes[i].vdisplay == output_mode->vdisplay) {
+ mode = drm_mode_duplicate(encoder->dev, output_mode);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ } else {
+ mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
+ modes[i].vdisplay, 60, false,
+ output_mode->flags & DRM_MODE_FLAG_INTERLACE,
+ false);
+ }
+
+ /* CVT modes are sometimes unsuitable... */
+ if (output_mode->hdisplay <= 720
+ || output_mode->hdisplay >= 1920) {
+ mode->htotal = output_mode->htotal;
+ mode->hsync_start = (mode->hdisplay + (mode->htotal
+ - mode->hdisplay) * 9 / 10) & ~7;
+ mode->hsync_end = mode->hsync_start + 8;
+ }
+ if (output_mode->vdisplay >= 1024) {
+ mode->vtotal = output_mode->vtotal;
+ mode->vsync_start = output_mode->vsync_start;
+ mode->vsync_end = output_mode->vsync_end;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+ drm_mode_probed_add(connector, mode);
+ n++;
+ }
+ return n;
+}
+
+static int nv17_tv_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ struct drm_display_mode *output_mode =
+ &tv_norm->ctv_enc_mode.mode;
+
+ if (mode->clock > 400000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > output_mode->hdisplay ||
+ mode->vdisplay > output_mode->vdisplay)
+ return MODE_BAD;
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
+ (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
+ return MODE_NO_INTERLACE;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ } else {
+ const int vsync_tolerance = 600;
+
+ if (mode->clock > 70000)
+ return MODE_CLOCK_HIGH;
+
+ if (abs(drm_mode_vrefresh(mode) * 1000 -
+ tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
+ return MODE_VSYNC;
+
+ /* The encoder takes care of the actual interlacing */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+ }
+
+ return MODE_OK;
+}
+
+static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (tv_norm->kind == CTV_ENC_MODE)
+ adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
+ else
+ adjusted_mode->clock = 90000;
+
+ return true;
+}
+
+static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (nouveau_encoder(encoder)->last_dpms == mode)
+ return;
+ nouveau_encoder(encoder)->last_dpms = mode;
+
+ NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nouveau_encoder(encoder)->dcb->index);
+
+ regs->ptv_200 &= ~1;
+
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ nv04_dfp_update_fp_control(encoder, mode);
+
+ } else {
+ nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ regs->ptv_200 |= 1;
+ }
+
+ nv_load_ptv(dev, regs, 200);
+
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+
+ nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv17_tv_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ int head = nouveau_crtc(encoder->crtc)->index;
+ uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
+ NV_CIO_CRE_LCD__INDEX];
+ uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder);
+ uint32_t dacclk;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_disable(dev, head);
+
+ /* Unbind any FP encoders from this head if we need the FP
+ * stuff enabled. */
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ struct drm_encoder *enc;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
+
+ if ((dcb->type == OUTPUT_TMDS ||
+ dcb->type == OUTPUT_LVDS) &&
+ !enc->crtc &&
+ nv04_dfp_get_bound_head(dev, dcb) == head) {
+ nv04_dfp_bind_head(dev, dcb, head ^ 1,
+ dev_priv->VBIOS.fp.dual_link);
+ }
+ }
+
+ }
+
+ /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+ * at LCD__INDEX which we don't alter
+ */
+ if (!(*cr_lcd & 0x44)) {
+ if (tv_norm->kind == CTV_ENC_MODE)
+ *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
+ else
+ *cr_lcd = 0;
+ }
+
+ /* Set the DACCLK register */
+ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
+
+ if (dev_priv->card_type == NV_40)
+ dacclk |= 0x1a << 16;
+
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ dacclk |= 0x20;
+
+ if (head)
+ dacclk |= 0x100;
+ else
+ dacclk &= ~0x100;
+
+ } else {
+ dacclk |= 0x10;
+
+ }
+
+ NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
+}
+
+static void nv17_tv_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+ struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ int i;
+
+ regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
+ regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
+ regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
+ regs->tv_setup = 1;
+ regs->ramdac_8c0 = 0x0;
+
+ if (tv_norm->kind == TV_ENC_MODE) {
+ tv_regs->ptv_200 = 0x13111100;
+ if (head)
+ tv_regs->ptv_200 |= 0x10;
+
+ tv_regs->ptv_20c = 0x808010;
+ tv_regs->ptv_304 = 0x2d00000;
+ tv_regs->ptv_600 = 0x0;
+ tv_regs->ptv_60c = 0x0;
+ tv_regs->ptv_610 = 0x1e00000;
+
+ if (tv_norm->tv_enc_mode.vdisplay == 576) {
+ tv_regs->ptv_508 = 0x1200000;
+ tv_regs->ptv_614 = 0x33;
+
+ } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+ tv_regs->ptv_508 = 0xf00000;
+ tv_regs->ptv_614 = 0x13;
+ }
+
+ if (dev_priv->card_type >= NV_30) {
+ tv_regs->ptv_500 = 0xe8e0;
+ tv_regs->ptv_504 = 0x1710;
+ tv_regs->ptv_604 = 0x0;
+ tv_regs->ptv_608 = 0x0;
+ } else {
+ if (tv_norm->tv_enc_mode.vdisplay == 576) {
+ tv_regs->ptv_604 = 0x20;
+ tv_regs->ptv_608 = 0x10;
+ tv_regs->ptv_500 = 0x19710;
+ tv_regs->ptv_504 = 0x68f0;
+
+ } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+ tv_regs->ptv_604 = 0x10;
+ tv_regs->ptv_608 = 0x20;
+ tv_regs->ptv_500 = 0x4b90;
+ tv_regs->ptv_504 = 0x1b480;
+ }
+ }
+
+ for (i = 0; i < 0x40; i++)
+ tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
+
+ } else {
+ struct drm_display_mode *output_mode =
+ &tv_norm->ctv_enc_mode.mode;
+
+ /* The registers in PRAMDAC+0xc00 control some timings and CSC
+ * parameters for the CTV encoder (It's only used for "HD" TV
+ * modes, I don't think I have enough working to guess what
+ * they exactly mean...), it's probably connected at the
+ * output of the FP encoder, but it also needs the analog
+ * encoder in its OR enabled and routed to the head it's
+ * using. It's enabled with the DACCLK register, bits [5:4].
+ */
+ for (i = 0; i < 38; i++)
+ regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
+
+ regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+ regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+ regs->fp_horiz_regs[FP_SYNC_START] =
+ output_mode->hsync_start - 1;
+ regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+ regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
+ max((output_mode->hdisplay-600)/40 - 1, 1);
+
+ regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+ regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+ regs->fp_vert_regs[FP_SYNC_START] =
+ output_mode->vsync_start - 1;
+ regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+ regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
+
+ regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+ NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+
+ if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+ if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+
+ regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+ NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+ NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+ NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+ NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+ regs->fp_debug_2 = 0;
+
+ regs->fp_margin_color = 0x801080;
+
+ }
+}
+
+static void nv17_tv_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
+ nv17_tv_update_rescaler(encoder);
+ nv17_tv_update_properties(encoder);
+ } else {
+ nv17_ctv_update_rescaler(encoder);
+ }
+
+ nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
+
+ /* This could use refinement for flatpanels, but it should work */
+ if (dev_priv->chipset < 0x44)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+ nv04_dac_output_offset(encoder),
+ 0xf0000000);
+ else
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+ nv04_dac_output_offset(encoder),
+ 0x00100000);
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(
+ &nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv17_tv_save(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+ nouveau_encoder(encoder)->restore.output =
+ NVReadRAMDAC(dev, 0,
+ NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder));
+
+ nv17_tv_state_save(dev, &tv_enc->saved_state);
+
+ tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
+}
+
+static void nv17_tv_restore(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder),
+ nouveau_encoder(encoder)->restore.output);
+
+ nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+}
+
+static int nv17_tv_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_mode_config *conf = &dev->mode_config;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
+ NUM_LD_TV_NORMS;
+ int i;
+
+ if (nouveau_tv_norm) {
+ for (i = 0; i < num_tv_norms; i++) {
+ if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
+ tv_enc->tv_norm = i;
+ break;
+ }
+ }
+
+ if (i == num_tv_norms)
+ NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
+ nouveau_tv_norm);
+ }
+
+ drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
+
+ drm_connector_attach_property(connector,
+ conf->tv_select_subconnector_property,
+ tv_enc->select_subconnector);
+ drm_connector_attach_property(connector,
+ conf->tv_subconnector_property,
+ tv_enc->subconnector);
+ drm_connector_attach_property(connector,
+ conf->tv_mode_property,
+ tv_enc->tv_norm);
+ drm_connector_attach_property(connector,
+ conf->tv_flicker_reduction_property,
+ tv_enc->flicker);
+ drm_connector_attach_property(connector,
+ conf->tv_saturation_property,
+ tv_enc->saturation);
+ drm_connector_attach_property(connector,
+ conf->tv_hue_property,
+ tv_enc->hue);
+ drm_connector_attach_property(connector,
+ conf->tv_overscan_property,
+ tv_enc->overscan);
+
+ return 0;
+}
+
+static int nv17_tv_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_mode_config *conf = &encoder->dev->mode_config;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ bool modes_changed = false;
+
+ if (property == conf->tv_overscan_property) {
+ tv_enc->overscan = val;
+ if (encoder->crtc) {
+ if (tv_norm->kind == CTV_ENC_MODE)
+ nv17_ctv_update_rescaler(encoder);
+ else
+ nv17_tv_update_rescaler(encoder);
+ }
+
+ } else if (property == conf->tv_saturation_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->saturation = val;
+ nv17_tv_update_properties(encoder);
+
+ } else if (property == conf->tv_hue_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->hue = val;
+ nv17_tv_update_properties(encoder);
+
+ } else if (property == conf->tv_flicker_reduction_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->flicker = val;
+ if (encoder->crtc)
+ nv17_tv_update_rescaler(encoder);
+
+ } else if (property == conf->tv_mode_property) {
+ if (connector->dpms != DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ tv_enc->tv_norm = val;
+
+ modes_changed = true;
+
+ } else if (property == conf->tv_select_subconnector_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->select_subconnector = val;
+ nv17_tv_update_properties(encoder);
+
+ } else {
+ return -EINVAL;
+ }
+
+ if (modes_changed) {
+ drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+ /* Disable the crtc to ensure a full modeset is
+ * performed whenever it's turned on again. */
+ if (crtc) {
+ struct drm_mode_set modeset = {
+ .crtc = crtc,
+ };
+
+ crtc->funcs->set_config(&modeset);
+ }
+ }
+
+ return 0;
+}
+
+static void nv17_tv_destroy(struct drm_encoder *encoder)
+{
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+ NV_DEBUG(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(tv_enc);
+}
+
+static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
+ .dpms = nv17_tv_dpms,
+ .save = nv17_tv_save,
+ .restore = nv17_tv_restore,
+ .mode_fixup = nv17_tv_mode_fixup,
+ .prepare = nv17_tv_prepare,
+ .commit = nv17_tv_commit,
+ .mode_set = nv17_tv_mode_set,
+ .detect = nv17_dac_detect,
+};
+
+static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+ .get_modes = nv17_tv_get_modes,
+ .mode_valid = nv17_tv_mode_valid,
+ .create_resources = nv17_tv_create_resources,
+ .set_property = nv17_tv_set_property,
+};
+
+static struct drm_encoder_funcs nv17_tv_funcs = {
+ .destroy = nv17_tv_destroy,
+};
+
+int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct drm_encoder *encoder;
+ struct nv17_tv_encoder *tv_enc = NULL;
+
+ tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
+ if (!tv_enc)
+ return -ENOMEM;
+
+ tv_enc->overscan = 50;
+ tv_enc->flicker = 50;
+ tv_enc->saturation = 50;
+ tv_enc->hue = 0;
+ tv_enc->tv_norm = TV_NORM_PAL;
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+ tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+ tv_enc->pin_mask = 0;
+
+ encoder = to_drm_encoder(&tv_enc->base);
+
+ tv_enc->base.dcb = entry;
+ tv_enc->base.or = ffs(entry->or) - 1;
+
+ drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
+ to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
new file mode 100644
index 00000000000..c00977cedab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV17_TV_H__
+#define __NV17_TV_H__
+
+struct nv17_tv_state {
+ uint8_t tv_enc[0x40];
+
+ uint32_t hfilter[4][7];
+ uint32_t hfilter2[4][7];
+ uint32_t vfilter[4][7];
+
+ uint32_t ptv_200;
+ uint32_t ptv_204;
+ uint32_t ptv_208;
+ uint32_t ptv_20c;
+ uint32_t ptv_304;
+ uint32_t ptv_500;
+ uint32_t ptv_504;
+ uint32_t ptv_508;
+ uint32_t ptv_600;
+ uint32_t ptv_604;
+ uint32_t ptv_608;
+ uint32_t ptv_60c;
+ uint32_t ptv_610;
+ uint32_t ptv_614;
+};
+
+enum nv17_tv_norm{
+ TV_NORM_PAL,
+ TV_NORM_PAL_M,
+ TV_NORM_PAL_N,
+ TV_NORM_PAL_NC,
+ TV_NORM_NTSC_M,
+ TV_NORM_NTSC_J,
+ NUM_LD_TV_NORMS,
+ TV_NORM_HD480I = NUM_LD_TV_NORMS,
+ TV_NORM_HD480P,
+ TV_NORM_HD576I,
+ TV_NORM_HD576P,
+ TV_NORM_HD720P,
+ TV_NORM_HD1080I,
+ NUM_TV_NORMS
+};
+
+struct nv17_tv_encoder {
+ struct nouveau_encoder base;
+
+ struct nv17_tv_state state;
+ struct nv17_tv_state saved_state;
+
+ int overscan;
+ int flicker;
+ int saturation;
+ int hue;
+ enum nv17_tv_norm tv_norm;
+ int subconnector;
+ int select_subconnector;
+ uint32_t pin_mask;
+};
+#define to_tv_enc(x) container_of(nouveau_encoder(x), \
+ struct nv17_tv_encoder, base)
+
+extern char *nv17_tv_norm_names[NUM_TV_NORMS];
+
+extern struct nv17_tv_norm_params {
+ enum {
+ TV_ENC_MODE,
+ CTV_ENC_MODE,
+ } kind;
+
+ union {
+ struct {
+ int hdisplay;
+ int vdisplay;
+ int vrefresh; /* mHz */
+
+ uint8_t tv_enc[0x40];
+ } tv_enc_mode;
+
+ struct {
+ struct drm_display_mode mode;
+
+ uint32_t ctv_regs[38];
+ } ctv_enc_mode;
+ };
+
+} nv17_tv_norms[NUM_TV_NORMS];
+#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
+
+extern struct drm_display_mode nv17_tv_modes[];
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+ return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_update_properties(struct drm_encoder *encoder);
+void nv17_tv_update_rescaler(struct drm_encoder *encoder);
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
+
+/* TV hardware access functions */
+
+static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
+{
+ return nv_rd32(dev, reg);
+}
+
+static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
+{
+ nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+ nv_write_ptv(dev, NV_PTV_TV_DATA, val);
+}
+
+static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
+{
+ nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+ return nv_read_ptv(dev, NV_PTV_TV_DATA);
+}
+
+#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
+#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
+#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
new file mode 100644
index 00000000000..d64683d97e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nv17_tv.h"
+
+char *nv17_tv_norm_names[NUM_TV_NORMS] = {
+ [TV_NORM_PAL] = "PAL",
+ [TV_NORM_PAL_M] = "PAL-M",
+ [TV_NORM_PAL_N] = "PAL-N",
+ [TV_NORM_PAL_NC] = "PAL-Nc",
+ [TV_NORM_NTSC_M] = "NTSC-M",
+ [TV_NORM_NTSC_J] = "NTSC-J",
+ [TV_NORM_HD480I] = "hd480i",
+ [TV_NORM_HD480P] = "hd480p",
+ [TV_NORM_HD576I] = "hd576i",
+ [TV_NORM_HD576P] = "hd576p",
+ [TV_NORM_HD720P] = "hd720p",
+ [TV_NORM_HD1080I] = "hd1080i"
+};
+
+/* TV standard specific parameters */
+
+struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
+ [TV_NORM_PAL] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+ 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+ 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_PAL_M] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_PAL_N] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_PAL_NC] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+ 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+ 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_NTSC_M] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_NTSC_J] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_HD480I] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_HD576I] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+ 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+ 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+ } } } },
+
+
+ [TV_NORM_HD480P] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
+ 720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+ 0x354003a, 0x40000, 0x6f0344, 0x18100000,
+ 0x10160004, 0x10060005, 0x1006000c, 0x10060020,
+ 0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
+ 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
+ 0x10000fff, 0x10000fff, 0x10000fff, 0x70,
+ 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+ 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+ } } } },
+
+ [TV_NORM_HD576P] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
+ 720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+ 0x354003a, 0x40000, 0x6f0344, 0x18100000,
+ 0x10060001, 0x10060009, 0x10060026, 0x10060027,
+ 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
+ 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
+ 0x10000fff, 0x10000fff, 0x10000fff, 0x69,
+ 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+ 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+ } } } },
+
+ [TV_NORM_HD720P] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
+ 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
+ 0x66b0021, 0x6004a, 0x1210626, 0x8170000,
+ 0x70004, 0x70016, 0x70017, 0x40f0018,
+ 0x702e8, 0x81702ed, 0xfff, 0xfff,
+ 0xfff, 0xfff, 0xfff, 0xfff,
+ 0xfff, 0xfff, 0xfff, 0x0,
+ 0x2e40001, 0x58, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
+ 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_HD1080I] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
+ 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+ | DRM_MODE_FLAG_INTERLACE) },
+ .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
+ 0x8940028, 0x60054, 0xe80870, 0xbf70000,
+ 0xbc70004, 0x70005, 0x70012, 0x70013,
+ 0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
+ 0x1c70237, 0x70238, 0x70244, 0x70245,
+ 0x40f0246, 0x70462, 0x1f70464, 0x0,
+ 0x2e40001, 0x58, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
+ 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
+ } } } }
+};
+
+/*
+ * The following is some guesswork on how the TV encoder flicker
+ * filter/rescaler works:
+ *
+ * It seems to use some sort of resampling filter, it is controlled
+ * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
+ * control the horizontal and vertical stage respectively, there is
+ * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
+ * but they seem to do nothing. A rough guess might be that they could
+ * be used to independently control the filtering of each interlaced
+ * field, but I don't know how they are enabled. The whole filtering
+ * process seems to be disabled with bits 26:27 of PTV_200, but we
+ * aren't doing that.
+ *
+ * The layout of both register sets is the same:
+ *
+ * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
+ * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
+ *
+ * Each coefficient is stored in bits [31],[15:9] in two's complement
+ * format. They seem to be some kind of weights used in a low-pass
+ * filter. Both A and B coefficients are applied to the 14 nearest
+ * samples on each side (Listed from nearest to furthermost. They
+ * roughly cover 2 framebuffer pixels on each side). They are
+ * probably multiplied with some more hardwired weights before being
+ * used: B-coefficients are applied the same on both sides,
+ * A-coefficients are inverted before being applied to the opposite
+ * side.
+ *
+ * After all the hassle, I got the following formula by empirical
+ * means...
+ */
+
+#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
+
+#define id1 (1LL << 8)
+#define id2 (1LL << 16)
+#define id3 (1LL << 24)
+#define id4 (1LL << 32)
+#define id5 (1LL << 48)
+
+static struct filter_params{
+ int64_t k1;
+ int64_t ki;
+ int64_t ki2;
+ int64_t ki3;
+ int64_t kr;
+ int64_t kir;
+ int64_t ki2r;
+ int64_t ki3r;
+ int64_t kf;
+ int64_t kif;
+ int64_t ki2f;
+ int64_t ki3f;
+ int64_t krf;
+ int64_t kirf;
+ int64_t ki2rf;
+ int64_t ki3rf;
+} fparams[2][4] = {
+ /* Horizontal filter parameters */
+ {
+ {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
+ 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
+ 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
+ -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
+ {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
+ 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
+ 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
+ -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
+ {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
+ 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
+ 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
+ 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
+ {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
+ -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
+ -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
+ 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
+ },
+
+ /* Vertical filter parameters */
+ {
+ {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
+ -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
+ -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
+ 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
+ {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
+ 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
+ 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
+ -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
+ {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
+ 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
+ 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
+ -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
+ {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
+ 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
+ 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
+ -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
+ }
+};
+
+static void tv_setup_filter(struct drm_encoder *encoder)
+{
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ struct drm_display_mode *mode = &encoder->crtc->mode;
+ uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
+ &tv_enc->state.vfilter};
+ int i, j, k;
+ int32_t overscan = calc_overscan(tv_enc->overscan);
+ int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
+ uint64_t rs[] = {mode->hdisplay * id3,
+ mode->vdisplay * id3};
+
+ do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
+ do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
+
+ for (k = 0; k < 2; k++) {
+ rs[k] = max((int64_t)rs[k], id2);
+
+ for (j = 0; j < 4; j++) {
+ struct filter_params *p = &fparams[k][j];
+
+ for (i = 0; i < 7; i++) {
+ int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
+ + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
+ + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
+ + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
+
+ (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
+ }
+ }
+ }
+}
+
+/* Hardware state saving/restoring */
+
+static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+{
+ int i, j;
+ uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 7; j++)
+ regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
+ }
+}
+
+static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+{
+ int i, j;
+ uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 7; j++)
+ nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
+ }
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
+{
+ int i;
+
+ for (i = 0; i < 0x40; i++)
+ state->tv_enc[i] = nv_read_tv_enc(dev, i);
+
+ tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
+ tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+ tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+ nv_save_ptv(dev, state, 200);
+ nv_save_ptv(dev, state, 204);
+ nv_save_ptv(dev, state, 208);
+ nv_save_ptv(dev, state, 20c);
+ nv_save_ptv(dev, state, 304);
+ nv_save_ptv(dev, state, 500);
+ nv_save_ptv(dev, state, 504);
+ nv_save_ptv(dev, state, 508);
+ nv_save_ptv(dev, state, 600);
+ nv_save_ptv(dev, state, 604);
+ nv_save_ptv(dev, state, 608);
+ nv_save_ptv(dev, state, 60c);
+ nv_save_ptv(dev, state, 610);
+ nv_save_ptv(dev, state, 614);
+}
+
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
+{
+ int i;
+
+ for (i = 0; i < 0x40; i++)
+ nv_write_tv_enc(dev, i, state->tv_enc[i]);
+
+ tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
+ tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+ tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+ nv_load_ptv(dev, state, 200);
+ nv_load_ptv(dev, state, 204);
+ nv_load_ptv(dev, state, 208);
+ nv_load_ptv(dev, state, 20c);
+ nv_load_ptv(dev, state, 304);
+ nv_load_ptv(dev, state, 500);
+ nv_load_ptv(dev, state, 504);
+ nv_load_ptv(dev, state, 508);
+ nv_load_ptv(dev, state, 600);
+ nv_load_ptv(dev, state, 604);
+ nv_load_ptv(dev, state, 608);
+ nv_load_ptv(dev, state, 60c);
+ nv_load_ptv(dev, state, 610);
+ nv_load_ptv(dev, state, 614);
+
+ /* This is required for some settings to kick in. */
+ nv_write_tv_enc(dev, 0x3e, 1);
+ nv_write_tv_enc(dev, 0x3e, 0);
+}
+
+/* Timings similar to the ones the blob sets */
+
+struct drm_display_mode nv17_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
+ 320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+ | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
+ 320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+ | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
+ 400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+ | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
+ 640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
+ 720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
+ 720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
+ 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
+ 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ {}
+};
+
+void nv17_tv_update_properties(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_state *regs = &tv_enc->state;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ int subconnector = tv_enc->select_subconnector ?
+ tv_enc->select_subconnector :
+ tv_enc->subconnector;
+
+ switch (subconnector) {
+ case DRM_MODE_SUBCONNECTOR_Composite:
+ {
+ regs->ptv_204 = 0x2;
+
+ /* The composite connector may be found on either pin. */
+ if (tv_enc->pin_mask & 0x4)
+ regs->ptv_204 |= 0x010000;
+ else if (tv_enc->pin_mask & 0x2)
+ regs->ptv_204 |= 0x100000;
+ else
+ regs->ptv_204 |= 0x110000;
+
+ regs->tv_enc[0x7] = 0x10;
+ break;
+ }
+ case DRM_MODE_SUBCONNECTOR_SVIDEO:
+ regs->ptv_204 = 0x11012;
+ regs->tv_enc[0x7] = 0x18;
+ break;
+
+ case DRM_MODE_SUBCONNECTOR_Component:
+ regs->ptv_204 = 0x111333;
+ regs->tv_enc[0x7] = 0x14;
+ break;
+
+ case DRM_MODE_SUBCONNECTOR_SCART:
+ regs->ptv_204 = 0x111012;
+ regs->tv_enc[0x7] = 0x18;
+ break;
+ }
+
+ regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
+ tv_enc->saturation);
+ regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
+ tv_enc->saturation);
+ regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
+
+ nv_load_ptv(dev, regs, 204);
+ nv_load_tv_enc(dev, regs, 7);
+ nv_load_tv_enc(dev, regs, 20);
+ nv_load_tv_enc(dev, regs, 22);
+ nv_load_tv_enc(dev, regs, 25);
+}
+
+void nv17_tv_update_rescaler(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_state *regs = &tv_enc->state;
+
+ regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
+
+ tv_setup_filter(encoder);
+
+ nv_load_ptv(dev, regs, 208);
+ tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
+ tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
+ tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
+}
+
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+ struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
+ struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
+ int overscan, hmargin, vmargin, hratio, vratio;
+
+ /* The rescaler doesn't do the right thing for interlaced modes. */
+ if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ overscan = 100;
+ else
+ overscan = tv_enc->overscan;
+
+ hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
+ vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
+
+ hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
+ overscan);
+ vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
+ overscan);
+
+ hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
+ vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
+
+ regs->fp_horiz_regs[FP_VALID_START] = hmargin;
+ regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
+ regs->fp_vert_regs[FP_VALID_START] = vmargin;
+ regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
+
+ regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+ XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
+ NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+ XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
+ regs->fp_horiz_regs[FP_VALID_START]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
+ regs->fp_horiz_regs[FP_VALID_END]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
+ regs->fp_vert_regs[FP_VALID_START]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
+ regs->fp_vert_regs[FP_VALID_END]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
new file mode 100644
index 00000000000..18ba74f1970
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -0,0 +1,780 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/*
+ * NV20
+ * -----
+ * There are 3 families :
+ * NV20 is 0x10de:0x020*
+ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
+ * NV2A is 0x10de:0x02A0
+ *
+ * NV30
+ * -----
+ * There are 3 families :
+ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
+ * NV34 is 0x10de:0x032*
+ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
+ *
+ * Not seen in the wild, no dumps (probably NV35) :
+ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
+ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
+ *
+ */
+
+#define NV20_GRCTX_SIZE (3580*4)
+#define NV25_GRCTX_SIZE (3529*4)
+#define NV2A_GRCTX_SIZE (3500*4)
+
+#define NV30_31_GRCTX_SIZE (24392)
+#define NV34_GRCTX_SIZE (18140)
+#define NV35_36_GRCTX_SIZE (22396)
+
+static void
+nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ for (i = 0x04d4; i <= 0x04e0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x04f4; i <= 0x0500; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080000);
+ for (i = 0x050c; i <= 0x0518; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x051c; i <= 0x0528; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x000105b8);
+ for (i = 0x052c; i <= 0x0538; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ for (i = 0x055c; i <= 0x0598; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
+ nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ for (i = 0x1c1c; i <= 0x248c; i += 16) {
+ nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+ nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+ nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ }
+ nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
+ nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
+ nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
+ for (i = 0x355c; i <= 0x3578; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
+ for (i = 0x0510; i <= 0x051c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x0530; i <= 0x053c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080000);
+ for (i = 0x0548; i <= 0x0554; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0558; i <= 0x0564; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x000105b8);
+ for (i = 0x0568; i <= 0x0574; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ for (i = 0x0598; i <= 0x05d4; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
+ nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
+ nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
+ nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
+ for (i = 0x1b04; i <= 0x2374; i += 16) {
+ nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+ nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+ nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ }
+ nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
+ nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
+ nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
+ for (i = 0x3484; i <= 0x34a0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ for (i = 0x04d4; i <= 0x04e0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x04f4; i <= 0x0500; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080000);
+ for (i = 0x050c; i <= 0x0518; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x051c; i <= 0x0528; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x000105b8);
+ for (i = 0x052c; i <= 0x0538; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ for (i = 0x055c; i <= 0x0598; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
+ nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
+ nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+ nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+ nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ }
+ nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
+ nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
+ nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
+ for (i = 0x341c; i <= 0x3438; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
+ nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
+ for (i = 0x04e0; i < 0x04e8; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
+ for (i = 0x0508; i < 0x0548; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
+ nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
+ nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
+ for (i = 0x0600; i < 0x0640; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00010588);
+ for (i = 0x0640; i < 0x0680; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x06c0; i < 0x0700; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ for (i = 0x0700; i < 0x0740; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0740; i < 0x0780; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
+ for (i = 0x0864; i < 0x0874; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00040004);
+ for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+ nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+ nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+ nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ }
+ for (i = 0x30b8; i < 0x30c8; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0000ffff);
+ nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
+}
+
+static void
+nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
+ nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
+ nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
+ for (i = 0x04d4; i < 0x04dc; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
+ for (i = 0x04fc; i < 0x053c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
+ nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
+ nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
+ for (i = 0x05f0; i < 0x0630; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00010588);
+ for (i = 0x0630; i < 0x0670; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x06b0; i < 0x06f0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ for (i = 0x06f0; i < 0x0730; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0730; i < 0x0770; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
+ for (i = 0x0858; i < 0x0868; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00040004);
+ for (i = 0x15ac; i <= 0x271c ; i += 16) {
+ nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+ nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+ nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ }
+ for (i = 0x274c; i < 0x275c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0000ffff);
+ nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
+}
+
+static void
+nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
+ nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
+ for (i = 0x04dc; i < 0x04e4; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
+ for (i = 0x0504; i < 0x0544; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
+ nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
+ nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
+ for (i = 0x0604; i < 0x0644; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00010588);
+ for (i = 0x0644; i < 0x0684; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x06c4; i < 0x0704; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ for (i = 0x0704; i < 0x0744; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0744; i < 0x0784; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
+ for (i = 0x0868; i < 0x0878; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00040004);
+ for (i = 0x1f1c; i <= 0x308c ; i += 16) {
+ nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+ nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+ nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ }
+ for (i = 0x30bc; i < 0x30cc; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0000ffff);
+ nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
+}
+
+int
+nv20_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
+ unsigned int ctx_size;
+ unsigned int idoffs = 0x28/4;
+ int ret;
+
+ switch (dev_priv->chipset) {
+ case 0x20:
+ ctx_size = NV20_GRCTX_SIZE;
+ ctx_init = nv20_graph_context_init;
+ idoffs = 0;
+ break;
+ case 0x25:
+ case 0x28:
+ ctx_size = NV25_GRCTX_SIZE;
+ ctx_init = nv25_graph_context_init;
+ break;
+ case 0x2a:
+ ctx_size = NV2A_GRCTX_SIZE;
+ ctx_init = nv2a_graph_context_init;
+ idoffs = 0;
+ break;
+ case 0x30:
+ case 0x31:
+ ctx_size = NV30_31_GRCTX_SIZE;
+ ctx_init = nv30_31_graph_context_init;
+ break;
+ case 0x34:
+ ctx_size = NV34_GRCTX_SIZE;
+ ctx_init = nv34_graph_context_init;
+ break;
+ case 0x35:
+ case 0x36:
+ ctx_size = NV35_36_GRCTX_SIZE;
+ ctx_init = nv35_36_graph_context_init;
+ break;
+ default:
+ ctx_size = 0;
+ ctx_init = nv35_36_graph_context_init;
+ NV_ERROR(dev, "Please contact the devs if you want your NV%x"
+ " card to work\n", dev_priv->chipset);
+ return -ENOSYS;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
+ if (ret)
+ return ret;
+
+ /* Initialise default context values */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ ctx_init(dev, chan->ramin_grctx->gpuobj);
+
+ /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
+ nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
+ (chan->id << 24) | 0x1); /* CTX_USER */
+
+ nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
+ chan->ramin_grctx->instance >> 4);
+
+ dev_priv->engine.instmem.finish_access(dev);
+ return 0;
+}
+
+void
+nv20_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (chan->ramin_grctx)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
+ dev_priv->engine.instmem.finish_access(dev);
+}
+
+int
+nv20_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t inst;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
+ NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+
+ nouveau_wait_for_idle(dev);
+ return 0;
+}
+
+int
+nv20_graph_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ uint32_t inst, tmp;
+
+ chan = pgraph->channel(dev);
+ if (!chan)
+ return 0;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
+ NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (pfifo->channels - 1) << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+static void
+nv20_graph_rdi(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, writecount = 32;
+ uint32_t rdi_index = 0x2c80000;
+
+ if (dev_priv->chipset == 0x20) {
+ rdi_index = 0x3d0000;
+ writecount = 15;
+ }
+
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
+ for (i = 0; i < writecount; i++)
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
+
+ nouveau_wait_for_idle(dev);
+}
+
+int
+nv20_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
+ uint32_t tmp, vramsz;
+ int ret, i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
+
+ if (!dev_priv->ctx_table) {
+ /* Create Context Pointer Table */
+ dev_priv->ctx_table_size = 32 * 4;
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table);
+ if (ret)
+ return ret;
+ }
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
+
+ nv20_graph_rdi(dev);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+ nv_wr32(dev, 0x40009C , 0x00000040);
+
+ if (dev_priv->chipset >= 0x25) {
+ nv_wr32(dev, 0x400890, 0x00080000);
+ nv_wr32(dev, 0x400610, 0x304B1FB6);
+ nv_wr32(dev, 0x400B80, 0x18B82880);
+ nv_wr32(dev, 0x400B84, 0x44000000);
+ nv_wr32(dev, 0x400098, 0x40000080);
+ nv_wr32(dev, 0x400B88, 0x000000ff);
+ } else {
+ nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+ nv_wr32(dev, 0x400094, 0x00000005);
+ nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+ nv_wr32(dev, 0x400B84, 0x24000000);
+ nv_wr32(dev, 0x400098, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ }
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, 0x00400904 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ nv_wr32(dev, 0x00400908 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ /* which is NV40_PGRAPH_TSIZE0(i) ?? */
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ nv_wr32(dev, 0x00400900 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ /* which is NV40_PGRAPH_TILE0(i) ?? */
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ }
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, 0x100300 + i * 4));
+ }
+ nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+ nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+ tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
+ nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+
+ /* begin RAM config */
+ vramsz = drm_get_resource_len(dev, 0) - 1;
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400820, 0);
+ nv_wr32(dev, 0x400824, 0);
+ nv_wr32(dev, 0x400864, vramsz - 1);
+ nv_wr32(dev, 0x400868, vramsz - 1);
+
+ /* interesting.. the below overwrites some of the tile setup above.. */
+ nv_wr32(dev, 0x400B20, 0x00000000);
+ nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+
+ return 0;
+}
+
+void
+nv20_graph_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
+}
+
+int
+nv30_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
+
+ if (!dev_priv->ctx_table) {
+ /* Create Context Pointer Table */
+ dev_priv->ctx_table_size = 32 * 4;
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table);
+ if (ret)
+ return ret;
+ }
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(dev, 0x400890, 0x01b463ff);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+ nv_wr32(dev, 0x400B80, 0x1003d888);
+ nv_wr32(dev, 0x400B84, 0x0c000000);
+ nv_wr32(dev, 0x400098, 0x00000000);
+ nv_wr32(dev, 0x40009C, 0x0005ad00);
+ nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+ nv_wr32(dev, 0x4000a0, 0x00000000);
+ nv_wr32(dev, 0x4000a4, 0x00000008);
+ nv_wr32(dev, 0x4008a8, 0xb784a400);
+ nv_wr32(dev, 0x400ba0, 0x002f8685);
+ nv_wr32(dev, 0x400ba4, 0x00231f3f);
+ nv_wr32(dev, 0x4008a4, 0x40000020);
+
+ if (dev_priv->chipset == 0x34) {
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
+ }
+
+ nv_wr32(dev, 0x4000c0, 0x00000016);
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, 0x00400904 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+ nv_wr32(dev, 0x00400908 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ /* which is NV40_PGRAPH_TSIZE0(i) ?? */
+ nv_wr32(dev, 0x00400900 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ /* which is NV40_PGRAPH_TILE0(i) ?? */
+ }
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, 0x0040075c , 0x00000001);
+
+ /* begin RAM config */
+ /* vramsz = drm_get_resource_len(dev, 0) - 1; */
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ if (dev_priv->chipset != 0x34) {
+ nv_wr32(dev, 0x400750, 0x00EA0000);
+ nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400750, 0x00EA0004);
+ nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
+ }
+
+ return 0;
+}
+
+struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x009e, false, NULL }, /* swzsurf */
+ { 0x0096, false, NULL }, /* celcius */
+ { 0x0097, false, NULL }, /* kelvin (nv20) */
+ { 0x0597, false, NULL }, /* kelvin (nv25) */
+ {}
+};
+
+struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x038a, false, NULL }, /* ifc (nv30) */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x0389, false, NULL }, /* sifm (nv30) */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x0362, false, NULL }, /* surf2d (nv30) */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x039e, false, NULL }, /* swzsurf */
+ { 0x0397, false, NULL }, /* rankine (nv30) */
+ { 0x0497, false, NULL }, /* rankine (nv35) */
+ { 0x0697, false, NULL }, /* rankine (nv34) */
+ {}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
new file mode 100644
index 00000000000..ca1d27107a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -0,0 +1,62 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fb_bar_size, tmp;
+ int num_tiles;
+ int i;
+
+ /* This is strictly a NV4x register (don't know about NV5x). */
+ /* The blob sets these to all kinds of values, and they mess up our setup. */
+ /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
+ /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
+ /* Any idea what this is? */
+ nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
+ nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
+ num_tiles = NV10_PFB_TILE__SIZE;
+ break;
+ case 0x46: /* G72 */
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ case 0x4c: /* C51 (G7X version) */
+ num_tiles = NV40_PFB_TILE__SIZE_1;
+ break;
+ default:
+ num_tiles = NV40_PFB_TILE__SIZE_0;
+ break;
+ }
+
+ fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+ switch (dev_priv->chipset) {
+ case 0x40:
+ for (i = 0; i < num_tiles; i++) {
+ nv_wr32(dev, NV10_PFB_TILE(i), 0);
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
+ }
+ break;
+ default:
+ for (i = 0; i < num_tiles; i++) {
+ nv_wr32(dev, NV40_PFB_TILE(i), 0);
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv40_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
new file mode 100644
index 00000000000..b4f19ccb8b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
+#define NV40_RAMFC__SIZE 128
+
+int
+nv40_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV40_RAMFC(chan->id);
+ int ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
+ NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wi32(dev, fc + 0, chan->pushbuf_base);
+ nv_wi32(dev, fc + 4, chan->pushbuf_base);
+ nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0x30000000 /* no idea.. */);
+ nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
+ nv_wi32(dev, fc + 60, 0x0001FFFF);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /* enable the fifo dma operation */
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ return 0;
+}
+
+void
+nv40_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+ if (chan->ramfc)
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv40_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
+
+ /* No idea what 0x2058 is.. */
+ tmp = nv_ri32(dev, fc + 24);
+ tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
+ tmp2 |= (tmp & 0x30000000);
+ nv_wr32(dev, 0x2058, tmp2);
+ tmp &= ~0x30000000;
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
+ tmp = nv_ri32(dev, fc + 40);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
+ nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
+
+ /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
+ tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
+ tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
+
+ nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
+ /* NVIDIA does this next line twice... */
+ nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
+ nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
+ nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv40_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t tmp;
+
+ nv40_fifo_do_load_context(dev, chan->id);
+
+ /* Set channel active, and in DMA mode */
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+ /* Reset DMA_CTL_AT_INFO to INVALID */
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv40_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ uint32_t fc, tmp;
+ int chid;
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ return 0;
+ fc = NV40_RAMFC(chid);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
+ nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
+ nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
+ nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
+ tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
+ nv_wi32(dev, fc + 24, tmp);
+ nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+ nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+ nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+ tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
+ nv_wi32(dev, fc + 40, tmp);
+ nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+ nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
+ /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
+ * more involved depending on the value of 0x3228?
+ */
+ nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
+ nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
+ /* No idea what the below is for exactly, ripped from a mmio-trace */
+ nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
+ /* NVIDIA do this next line twice.. bug? */
+ nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
+ nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
+ nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
+#if 0 /* no real idea which is PUT/GET in UNK_48.. */
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
+ tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
+ nv_wi32(dev, fc + 72, tmp);
+#endif
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv40_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
+ return 0;
+}
+
+static void
+nv40_fifo_init_reset(struct drm_device *dev)
+{
+ int i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, 0x003224, 0x000f0078);
+ nv_wr32(dev, 0x003210, 0x00000000);
+ nv_wr32(dev, 0x003270, 0x00000000);
+ nv_wr32(dev, 0x003240, 0x00000000);
+ nv_wr32(dev, 0x003244, 0x00000000);
+ nv_wr32(dev, 0x003258, 0x00000000);
+ nv_wr32(dev, 0x002504, 0x00000000);
+ for (i = 0; i < 16; i++)
+ nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
+ nv_wr32(dev, 0x00250c, 0x0000ffff);
+ nv_wr32(dev, 0x002048, 0x00000000);
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x0032e8, 0x00000000);
+ nv_wr32(dev, 0x002410, 0x00000000);
+ nv_wr32(dev, 0x002420, 0x00000000);
+ nv_wr32(dev, 0x002058, 0x00000001);
+ nv_wr32(dev, 0x00221c, 0x00000000);
+ /* something with 0x2084, read/modify/write, no change */
+ nv_wr32(dev, 0x002040, 0x000000ff);
+ nv_wr32(dev, 0x002500, 0x00000000);
+ nv_wr32(dev, 0x003200, 0x00000000);
+
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
+}
+
+static void
+nv40_fifo_init_ramxx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, 0x2230, 1);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
+ break;
+ default:
+ nv_wr32(dev, 0x2230, 0);
+ nv_wr32(dev, NV40_PFIFO_RAMFC,
+ ((nouveau_mem_fb_amount(dev) - 512 * 1024 +
+ dev_priv->ramfc_offset) >> 16) | (3 << 16));
+ break;
+ }
+}
+
+static void
+nv40_fifo_init_intr(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv40_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ int i;
+
+ nv40_fifo_init_reset(dev);
+ nv40_fifo_init_ramxx(dev);
+
+ nv40_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+ nv40_fifo_init_intr(dev);
+ pfifo->enable(dev);
+ pfifo->reassign(dev, true);
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ if (dev_priv->fifos[i]) {
+ uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+ nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
new file mode 100644
index 00000000000..d3e0a2a6acf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+MODULE_FIRMWARE("nouveau/nv40.ctxprog");
+MODULE_FIRMWARE("nouveau/nv40.ctxvals");
+MODULE_FIRMWARE("nouveau/nv41.ctxprog");
+MODULE_FIRMWARE("nouveau/nv41.ctxvals");
+MODULE_FIRMWARE("nouveau/nv42.ctxprog");
+MODULE_FIRMWARE("nouveau/nv42.ctxvals");
+MODULE_FIRMWARE("nouveau/nv43.ctxprog");
+MODULE_FIRMWARE("nouveau/nv43.ctxvals");
+MODULE_FIRMWARE("nouveau/nv44.ctxprog");
+MODULE_FIRMWARE("nouveau/nv44.ctxvals");
+MODULE_FIRMWARE("nouveau/nv46.ctxprog");
+MODULE_FIRMWARE("nouveau/nv46.ctxvals");
+MODULE_FIRMWARE("nouveau/nv47.ctxprog");
+MODULE_FIRMWARE("nouveau/nv47.ctxvals");
+MODULE_FIRMWARE("nouveau/nv49.ctxprog");
+MODULE_FIRMWARE("nouveau/nv49.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
+MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
+MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
+
+struct nouveau_channel *
+nv40_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int i;
+
+ inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+ return NULL;
+ inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->ramin_grctx &&
+ chan->ramin_grctx->instance == inst)
+ return chan;
+ }
+
+ return NULL;
+}
+
+int
+nv40_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ctx;
+ int ret;
+
+ /* Allocate a 175KiB block of PRAMIN to store the context. This
+ * is massive overkill for a lot of chipsets, but it should be safe
+ * until we're able to implement this properly (will happen at more
+ * or less the same time we're able to write our own context programs.
+ */
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
+ if (ret)
+ return ret;
+ ctx = chan->ramin_grctx->gpuobj;
+
+ /* Initialise default context values */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv40_grctx_vals_load(dev, ctx);
+ nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ return 0;
+}
+
+void
+nv40_graph_destroy_context(struct nouveau_channel *chan)
+{
+ nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
+}
+
+static int
+nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
+{
+ uint32_t old_cp, tv = 1000, tmp;
+ int i;
+
+ old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+
+ tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
+ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
+
+ tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
+ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
+
+ nouveau_wait_for_idle(dev);
+
+ for (i = 0; i < tv; i++) {
+ if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
+ break;
+ }
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
+
+ if (i == tv) {
+ uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
+ NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
+ NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
+ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
+ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
+ NV_ERROR(dev, "0x40030C = 0x%08x\n",
+ nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/* Restore the context for a specific channel into PGRAPH */
+int
+nv40_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t inst;
+ int ret;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ ret = nv40_graph_transfer_context(dev, inst, 0);
+ if (ret)
+ return ret;
+
+ /* 0x40032C, no idea of it's exact function. Could simply be a
+ * record of the currently active PGRAPH context. It's currently
+ * unknown as to what bit 24 does. The nv ddx has it set, so we will
+ * set it here too.
+ */
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
+ (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
+ NV40_PGRAPH_CTXCTL_CUR_LOADED);
+ /* 0x32E0 records the instance address of the active FIFO's PGRAPH
+ * context. If at any time this doesn't match 0x40032C, you will
+ * recieve PGRAPH_INTR_CONTEXT_SWITCH
+ */
+ nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
+ return 0;
+}
+
+int
+nv40_graph_unload_context(struct drm_device *dev)
+{
+ uint32_t inst;
+ int ret;
+
+ inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+ return 0;
+ inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
+
+ ret = nv40_graph_transfer_context(dev, inst, 1);
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
+ return ret;
+}
+
+struct nouveau_ctxprog {
+ uint32_t signature;
+ uint8_t version;
+ uint16_t length;
+ uint32_t data[];
+} __attribute__ ((packed));
+
+struct nouveau_ctxvals {
+ uint32_t signature;
+ uint8_t version;
+ uint32_t length;
+ struct {
+ uint32_t offset;
+ uint32_t value;
+ } data[];
+} __attribute__ ((packed));
+
+int
+nv40_grctx_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ const int chipset = dev_priv->chipset;
+ const struct firmware *fw;
+ const struct nouveau_ctxprog *cp;
+ const struct nouveau_ctxvals *cv;
+ char name[32];
+ int ret, i;
+
+ pgraph->accel_blocked = true;
+
+ if (!pgraph->ctxprog) {
+ sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
+ return ret;
+ }
+
+ pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxprog) {
+ NV_ERROR(dev, "OOM copying ctxprog\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxprog, fw->data, fw->size);
+
+ cp = pgraph->ctxprog;
+ if (cp->signature != 0x5043564e || cp->version != 0 ||
+ cp->length != ((fw->size - 7) / 4)) {
+ NV_ERROR(dev, "ctxprog invalid\n");
+ release_firmware(fw);
+ nv40_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ if (!pgraph->ctxvals) {
+ sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
+ nv40_grctx_fini(dev);
+ return ret;
+ }
+
+ pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxprog) {
+ NV_ERROR(dev, "OOM copying ctxprog\n");
+ release_firmware(fw);
+ nv40_grctx_fini(dev);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxvals, fw->data, fw->size);
+
+ cv = (void *)pgraph->ctxvals;
+ if (cv->signature != 0x5643564e || cv->version != 0 ||
+ cv->length != ((fw->size - 9) / 8)) {
+ NV_ERROR(dev, "ctxvals invalid\n");
+ release_firmware(fw);
+ nv40_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ cp = pgraph->ctxprog;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < cp->length; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
+
+ pgraph->accel_blocked = false;
+ return 0;
+}
+
+void
+nv40_grctx_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+ if (pgraph->ctxprog) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxprog = NULL;
+ }
+
+ if (pgraph->ctxvals) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxvals = NULL;
+ }
+}
+
+void
+nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_ctxvals *cv = pgraph->ctxvals;
+ int i;
+
+ if (!cv)
+ return;
+
+ for (i = 0; i < cv->length; i++)
+ nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
+}
+
+/*
+ * G70 0x47
+ * G71 0x49
+ * NV45 0x48
+ * G72[M] 0x46
+ * G73 0x4b
+ * C51_G7X 0x4c
+ * C51 0x4e
+ */
+int
+nv40_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
+ uint32_t vramsz, tmp;
+ int i, j;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ nv40_grctx_init(dev);
+
+ /* No context present currently */
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ j = nv_rd32(dev, 0x1540) & 0xff;
+ if (j) {
+ for (i = 0; !(j & 1); j >>= 1, i++)
+ ;
+ nv_wr32(dev, 0x405000, i);
+ }
+
+ if (dev_priv->chipset == 0x40) {
+ nv_wr32(dev, 0x4009b0, 0x83280fff);
+ nv_wr32(dev, 0x4009b4, 0x000000a0);
+ } else {
+ nv_wr32(dev, 0x400820, 0x83280eff);
+ nv_wr32(dev, 0x400824, 0x000000a0);
+ }
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ nv_wr32(dev, 0x4009b8, 0x0078e366);
+ nv_wr32(dev, 0x4009bc, 0x0000014c);
+ break;
+ case 0x41:
+ case 0x42: /* pciid also 0x00Cx */
+ /* case 0x0120: XXX (pciid) */
+ nv_wr32(dev, 0x400828, 0x007596ff);
+ nv_wr32(dev, 0x40082c, 0x00000108);
+ break;
+ case 0x43:
+ nv_wr32(dev, 0x400828, 0x0072cb77);
+ nv_wr32(dev, 0x40082c, 0x00000108);
+ break;
+ case 0x44:
+ case 0x46: /* G72 */
+ case 0x4a:
+ case 0x4c: /* G7x-based C51 */
+ case 0x4e:
+ nv_wr32(dev, 0x400860, 0);
+ nv_wr32(dev, 0x400864, 0);
+ break;
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ nv_wr32(dev, 0x400828, 0x07830610);
+ nv_wr32(dev, 0x40082c, 0x0000016A);
+ break;
+ default:
+ break;
+ }
+
+ nv_wr32(dev, 0x400b38, 0x2ffff800);
+ nv_wr32(dev, 0x400b3c, 0x00006000);
+
+ /* copy tile info from PFB */
+ switch (dev_priv->chipset) {
+ case 0x40: /* vanilla NV40 */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ tmp = nv_rd32(dev, NV10_PFB_TILE(i));
+ nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+ tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
+ nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ case 0x44:
+ case 0x4a:
+ case 0x4e: /* NV44-based cores don't have 0x406900? */
+ for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
+ tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+ nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+ nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+ }
+ break;
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b: /* G7X-based cores */
+ for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
+ tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+ nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+ nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+ nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ default: /* everything else */
+ for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
+ tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+ nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+ nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ }
+
+ /* begin RAM config */
+ vramsz = drm_get_resource_len(dev, 0) - 1;
+ switch (dev_priv->chipset) {
+ case 0x40:
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400820, 0);
+ nv_wr32(dev, 0x400824, 0);
+ nv_wr32(dev, 0x400864, vramsz);
+ nv_wr32(dev, 0x400868, vramsz);
+ break;
+ default:
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
+ default:
+ nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
+ }
+ nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400840, 0);
+ nv_wr32(dev, 0x400844, 0);
+ nv_wr32(dev, 0x4008A0, vramsz);
+ nv_wr32(dev, 0x4008A4, vramsz);
+ break;
+ }
+
+ return 0;
+}
+
+void nv40_graph_takedown(struct drm_device *dev)
+{
+}
+
+struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x3089, false, NULL }, /* sifm (nv40) */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x3062, false, NULL }, /* surf2d (nv40) */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x309e, false, NULL }, /* swzsurf */
+ { 0x4097, false, NULL }, /* curie (nv40) */
+ { 0x4497, false, NULL }, /* curie (nv44) */
+ {}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
new file mode 100644
index 00000000000..2a3495e848e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -0,0 +1,38 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_mc_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x46: /* G72 */
+ case 0x4e:
+ case 0x4c: /* C51_G7X */
+ tmp = nv_rd32(dev, NV40_PFB_020C);
+ nv_wr32(dev, NV40_PMC_1700, tmp);
+ nv_wr32(dev, NV40_PMC_1704, 0);
+ nv_wr32(dev, NV40_PMC_1708, 0);
+ nv_wr32(dev, NV40_PMC_170C, tmp);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv40_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
new file mode 100644
index 00000000000..f8e28a1e44e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_mode.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+static void
+nv50_crtc_lut_load(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ int i;
+
+ NV_DEBUG(crtc->dev, "\n");
+
+ for (i = 0; i < 256; i++) {
+ writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
+ writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
+ writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
+ }
+
+ if (nv_crtc->lut.depth == 30) {
+ writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
+ writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
+ writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
+ }
+}
+
+int
+nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int index = nv_crtc->index, ret;
+
+ NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked");
+
+ if (blanked) {
+ nv_crtc->cursor.hide(nv_crtc, false);
+
+ ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
+ if (ret) {
+ NV_ERROR(dev, "no space while blanking crtc\n");
+ return ret;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
+ OUT_RING(evo, 0);
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+ } else {
+ if (nv_crtc->cursor.visible)
+ nv_crtc->cursor.show(nv_crtc, false);
+ else
+ nv_crtc->cursor.hide(nv_crtc, false);
+
+ ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
+ if (ret) {
+ NV_ERROR(dev, "no space while unblanking crtc\n");
+ return ret;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ OUT_RING(evo, nv_crtc->lut.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF :
+ NV50_EVO_CRTC_CLUT_MODE_ON);
+ OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
+ PAGE_SHIFT) >> 8);
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ OUT_RING(evo, NvEvoVRAM);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
+ OUT_RING(evo, nv_crtc->fb.offset >> 8);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ if (dev_priv->chipset != 0x50)
+ if (nv_crtc->fb.tile_flags == 0x7a00)
+ OUT_RING(evo, NvEvoFB32);
+ else
+ if (nv_crtc->fb.tile_flags == 0x7000)
+ OUT_RING(evo, NvEvoFB16);
+ else
+ OUT_RING(evo, NvEvoVRAM);
+ else
+ OUT_RING(evo, NvEvoVRAM);
+ }
+
+ nv_crtc->fb.blanked = blanked;
+ return 0;
+}
+
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+ if (ret) {
+ NV_ERROR(dev, "no space while setting dither\n");
+ return ret;
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
+ if (on)
+ OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
+ else
+ OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ }
+
+ return 0;
+}
+
+struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+ /* The safest approach is to find an encoder with the right crtc, that
+ * is also linked to a connector. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder)
+ if (connector->encoder->crtc == crtc)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+{
+ struct nouveau_connector *nv_connector =
+ nouveau_crtc_connector_get(nv_crtc);
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_display_mode *native_mode = NULL;
+ struct drm_display_mode *mode = &nv_crtc->base.mode;
+ uint32_t outX, outY, horiz, vert;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ switch (scaling_mode) {
+ case DRM_MODE_SCALE_NONE:
+ break;
+ default:
+ if (!nv_connector || !nv_connector->native_mode) {
+ NV_ERROR(dev, "No native mode, forcing panel scaling\n");
+ scaling_mode = DRM_MODE_SCALE_NONE;
+ } else {
+ native_mode = nv_connector->native_mode;
+ }
+ break;
+ }
+
+ switch (scaling_mode) {
+ case DRM_MODE_SCALE_ASPECT:
+ horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
+ vert = (native_mode->vdisplay << 19) / mode->vdisplay;
+
+ if (vert > horiz) {
+ outX = (mode->hdisplay * horiz) >> 19;
+ outY = (mode->vdisplay * horiz) >> 19;
+ } else {
+ outX = (mode->hdisplay * vert) >> 19;
+ outY = (mode->vdisplay * vert) >> 19;
+ }
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ outX = native_mode->hdisplay;
+ outY = native_mode->vdisplay;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_NONE:
+ default:
+ outX = mode->hdisplay;
+ outY = mode->vdisplay;
+ break;
+ }
+
+ ret = RING_SPACE(evo, update ? 7 : 5);
+ if (ret)
+ return ret;
+
+ /* Got a better name for SCALER_ACTIVE? */
+ /* One day i've got to really figure out why this is needed. */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ mode->hdisplay != outX || mode->vdisplay != outY) {
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
+ } else {
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
+ OUT_RING(evo, outY << 16 | outX);
+ OUT_RING(evo, outY << 16 | outX);
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ }
+
+ return 0;
+}
+
+int
+nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
+{
+ uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct nouveau_pll_vals pll;
+ struct pll_lims limits;
+ uint32_t reg1, reg2;
+ int ret;
+
+ ret = get_pll_limits(dev, pll_reg, &limits);
+ if (ret)
+ return ret;
+
+ ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
+ if (ret <= 0)
+ return ret;
+
+ if (limits.vco2.maxfreq) {
+ reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
+ nv_wr32(dev, pll_reg, 0x10000611);
+ nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
+ nv_wr32(dev, pll_reg + 8,
+ reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+ } else {
+ reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
+ nv_wr32(dev, pll_reg, 0x50000610);
+ nv_wr32(dev, pll_reg + 4, reg1 |
+ (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+ }
+
+ return 0;
+}
+
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG(dev, "\n");
+
+ if (!crtc)
+ return;
+
+ drm_crtc_cleanup(&nv_crtc->base);
+
+ nv50_cursor_fini(nv_crtc);
+
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ kfree(nv_crtc->mode);
+ kfree(nv_crtc);
+}
+
+int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t buffer_handle, uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_bo *cursor = NULL;
+ struct drm_gem_object *gem;
+ int ret = 0, i;
+
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ if (!buffer_handle) {
+ nv_crtc->cursor.hide(nv_crtc, true);
+ return 0;
+ }
+
+ gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+ if (!gem)
+ return -EINVAL;
+ cursor = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(cursor);
+ if (ret)
+ goto out;
+
+ /* The simple will do for now. */
+ for (i = 0; i < 64 * 64; i++)
+ nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
+
+ nouveau_bo_unmap(cursor);
+
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
+ dev_priv->vm_vram_base);
+ nv_crtc->cursor.show(nv_crtc, true);
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->cursor.set_pos(nv_crtc, x, y);
+ return 0;
+}
+
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int i;
+
+ if (size != 256)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
+ }
+
+ /* We need to know the depth before we upload, but it's possible to
+ * get called before a framebuffer is bound. If this is the case,
+ * mark the lut values as dirty by setting depth==0, and it'll be
+ * uploaded on the first mode_set_base()
+ */
+ if (!nv_crtc->base.fb) {
+ nv_crtc->lut.depth = 0;
+ return;
+ }
+
+ nv50_crtc_lut_load(crtc);
+}
+
+static void
+nv50_crtc_save(struct drm_crtc *crtc)
+{
+ NV_ERROR(crtc->dev, "!!\n");
+}
+
+static void
+nv50_crtc_restore(struct drm_crtc *crtc)
+{
+ NV_ERROR(crtc->dev, "!!\n");
+}
+
+static const struct drm_crtc_funcs nv50_crtc_funcs = {
+ .save = nv50_crtc_save,
+ .restore = nv50_crtc_restore,
+ .cursor_set = nv50_crtc_cursor_set,
+ .cursor_move = nv50_crtc_cursor_move,
+ .gamma_set = nv50_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv50_crtc_destroy,
+};
+
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+ /* Disconnect all unused encoders. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (drm_helper_encoder_in_use(encoder))
+ continue;
+
+ nv_encoder->disconnect(nv_encoder);
+ }
+
+ nv50_crtc_blank(nv_crtc, true);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc *crtc2;
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+ nv50_crtc_blank(nv_crtc, false);
+
+ /* Explicitly blank all unused crtc's. */
+ list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
+ if (!drm_helper_crtc_in_use(crtc2))
+ nv50_crtc_blank(nouveau_crtc(crtc2), true);
+ }
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while committing crtc\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb, bool update)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ int ret, format;
+
+ NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+ switch (drm_fb->depth) {
+ case 8:
+ format = NV50_EVO_CRTC_FB_DEPTH_8;
+ break;
+ case 15:
+ format = NV50_EVO_CRTC_FB_DEPTH_15;
+ break;
+ case 16:
+ format = NV50_EVO_CRTC_FB_DEPTH_16;
+ break;
+ case 24:
+ case 32:
+ format = NV50_EVO_CRTC_FB_DEPTH_24;
+ break;
+ case 30:
+ format = NV50_EVO_CRTC_FB_DEPTH_30;
+ break;
+ default:
+ NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
+ return -EINVAL;
+ }
+
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
+
+ nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+ nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
+ nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
+ if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
+ ret = RING_SPACE(evo, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
+ if (nv_crtc->fb.tile_flags == 0x7a00)
+ OUT_RING(evo, NvEvoFB32);
+ else
+ if (nv_crtc->fb.tile_flags == 0x7000)
+ OUT_RING(evo, NvEvoFB16);
+ else
+ OUT_RING(evo, NvEvoVRAM);
+ }
+
+ ret = RING_SPACE(evo, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
+ OUT_RING(evo, nv_crtc->fb.offset >> 8);
+ OUT_RING(evo, 0);
+ OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
+ if (!nv_crtc->fb.tile_flags) {
+ OUT_RING(evo, drm_fb->pitch | (1 << 20));
+ } else {
+ OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
+ fb->nvbo->tile_mode);
+ }
+ if (dev_priv->chipset == 0x50)
+ OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
+ else
+ OUT_RING(evo, format);
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
+ OUT_RING(evo, fb->base.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
+ OUT_RING(evo, (y << 16) | x);
+
+ if (nv_crtc->lut.depth != fb->base.depth) {
+ nv_crtc->lut.depth = fb->base.depth;
+ nv50_crtc_lut_load(crtc);
+ }
+
+ if (update) {
+ ret = RING_SPACE(evo, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ }
+
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_connector *nv_connector = NULL;
+ uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
+ uint32_t hunk1, vunk1, vunk2a, vunk2b;
+ int ret;
+
+ /* Find the connector attached to this CRTC */
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+
+ *nv_crtc->mode = *adjusted_mode;
+
+ NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+
+ hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+ vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+ hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
+ vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+ /* I can't give this a proper name, anyone else can? */
+ hunk1 = adjusted_mode->htotal -
+ adjusted_mode->hsync_start + adjusted_mode->hdisplay;
+ vunk1 = adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+ /* Another strange value, this time only for interlaced adjusted_modes. */
+ vunk2a = 2 * adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+ vunk2b = adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vtotal;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vsync_dur /= 2;
+ vsync_start_to_end /= 2;
+ vunk1 /= 2;
+ vunk2a /= 2;
+ vunk2b /= 2;
+ /* magic */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ vsync_start_to_end -= 1;
+ vunk1 -= 1;
+ vunk2a -= 1;
+ vunk2b -= 1;
+ }
+ }
+
+ ret = RING_SPACE(evo, 17);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
+ OUT_RING(evo, adjusted_mode->clock | 0x800000);
+ OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
+ OUT_RING(evo, 0);
+ OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
+ OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
+ OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
+ (hsync_start_to_end - 1));
+ OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
+ OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
+ } else {
+ OUT_RING(evo, 0);
+ OUT_RING(evo, 0);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
+ OUT_RING(evo, 0);
+
+ /* This is the actual resolution of the mode. */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
+ OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
+
+ nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
+ nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+
+ return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
+ .dpms = nv50_crtc_dpms,
+ .prepare = nv50_crtc_prepare,
+ .commit = nv50_crtc_commit,
+ .mode_fixup = nv50_crtc_mode_fixup,
+ .mode_set = nv50_crtc_mode_set,
+ .mode_set_base = nv50_crtc_mode_set_base,
+ .load_lut = nv50_crtc_lut_load,
+};
+
+int
+nv50_crtc_create(struct drm_device *dev, int index)
+{
+ struct nouveau_crtc *nv_crtc = NULL;
+ int ret, i;
+
+ NV_DEBUG(dev, "\n");
+
+ nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+ if (!nv_crtc)
+ return -ENOMEM;
+
+ nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
+ if (!nv_crtc->mode) {
+ kfree(nv_crtc);
+ return -ENOMEM;
+ }
+
+ /* Default CLUT parameters, will be activated on the hw upon
+ * first mode set.
+ */
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = i << 8;
+ nv_crtc->lut.g[i] = i << 8;
+ nv_crtc->lut.b[i] = i << 8;
+ }
+ nv_crtc->lut.depth = 0;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->lut.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ }
+
+ if (ret) {
+ kfree(nv_crtc->mode);
+ kfree(nv_crtc);
+ return ret;
+ }
+
+ nv_crtc->index = index;
+
+ /* set function pointers */
+ nv_crtc->set_dither = nv50_crtc_set_dither;
+ nv_crtc->set_scale = nv50_crtc_set_scale;
+
+ drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
+ drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
+
+ ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ }
+
+ nv50_cursor_init(nv_crtc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
new file mode 100644
index 00000000000..e2e79a8f220
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_mode.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_device *dev = nv_crtc->base.dev;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ if (update && nv_crtc->cursor.visible)
+ return;
+
+ ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while unhiding cursor\n");
+ return;
+ }
+
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ OUT_RING(evo, NvEvoVRAM);
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
+ OUT_RING(evo, nv_crtc->cursor.offset >> 8);
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ nv_crtc->cursor.visible = true;
+ }
+}
+
+static void
+nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_device *dev = nv_crtc->base.dev;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ if (update && !nv_crtc->cursor.visible)
+ return;
+
+ ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while hiding cursor\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
+ OUT_RING(evo, 0);
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
+ }
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ nv_crtc->cursor.visible = false;
+ }
+}
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
+ ((y & 0xFFFF) << 16) | (x & 0xFFFF));
+ /* Needed to make the cursor move. */
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+ NV_DEBUG(nv_crtc->base.dev, "\n");
+ if (offset == nv_crtc->cursor.offset)
+ return;
+
+ nv_crtc->cursor.offset = offset;
+ if (nv_crtc->cursor.visible) {
+ nv_crtc->cursor.visible = false;
+ nv_crtc->cursor.show(nv_crtc, true);
+ }
+}
+
+int
+nv50_cursor_init(struct nouveau_crtc *nv_crtc)
+{
+ nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
+ nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
+ nv_crtc->cursor.hide = nv50_cursor_hide;
+ nv_crtc->cursor.show = nv50_cursor_show;
+ return 0;
+}
+
+void
+nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ int idx = nv_crtc->index;
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
+ if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+ NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+ NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
new file mode 100644
index 00000000000..fb5838e3be2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
+{
+ struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int ret;
+
+ NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or);
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while disconnecting DAC\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
+ OUT_RING(evo, 0);
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ enum drm_connector_status status = connector_status_disconnected;
+ uint32_t dpms_state, load_pattern, load_state;
+ int or = nv_encoder->or;
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
+ dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+ if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
+ NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+ NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+ return status;
+ }
+
+ /* Use bios provided value if possible. */
+ if (dev_priv->vbios->dactestval) {
+ load_pattern = dev_priv->vbios->dactestval;
+ NV_DEBUG(dev, "Using bios provided load_pattern of %d\n",
+ load_pattern);
+ } else {
+ load_pattern = 340;
+ NV_DEBUG(dev, "Using default load_pattern of %d\n",
+ load_pattern);
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
+ NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
+ mdelay(45); /* give it some time to process */
+ load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+
+ if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
+ NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
+ status = connector_status_connected;
+
+ if (status == connector_status_connected)
+ NV_DEBUG(dev, "Load was detected on output with or %d\n", or);
+ else
+ NV_DEBUG(dev, "Load was not detected on output with or %d\n", or);
+
+ return status;
+}
+
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t val;
+ int or = nv_encoder->or;
+
+ NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+
+ /* wait for it to be done */
+ if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
+ NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+ NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+ return;
+ }
+
+ val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_STANDBY:
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
+ break;
+ default:
+ break;
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+}
+
+static void
+nv50_dac_save(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static void
+nv50_dac_restore(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *connector;
+
+ NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+
+ connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!connector) {
+ NV_ERROR(encoder->dev, "Encoder has no connector\n");
+ return false;
+ }
+
+ if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+ connector->native_mode) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+
+ return true;
+}
+
+static void
+nv50_dac_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
+ uint32_t mode_ctl = 0, mode_ctl2 = 0;
+ int ret;
+
+ NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+
+ nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (crtc->index == 1)
+ mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
+ else
+ mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
+
+ /* Lacking a working tv-out, this is not a 100% sure. */
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG)
+ mode_ctl |= 0x40;
+ else
+ if (nv_encoder->dcb->type == OUTPUT_TV)
+ mode_ctl |= 0x100;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
+
+ ret = RING_SPACE(evo, 3);
+ if (ret) {
+ NV_ERROR(dev, "no space while connecting DAC\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
+ OUT_RING(evo, mode_ctl);
+ OUT_RING(evo, mode_ctl2);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
+ .dpms = nv50_dac_dpms,
+ .save = nv50_dac_save,
+ .restore = nv50_dac_restore,
+ .mode_fixup = nv50_dac_mode_fixup,
+ .prepare = nv50_dac_prepare,
+ .commit = nv50_dac_commit,
+ .mode_set = nv50_dac_mode_set,
+ .detect = nv50_dac_detect
+};
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (!encoder)
+ return;
+
+ NV_DEBUG(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
+ .destroy = nv50_dac_destroy,
+};
+
+int
+nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ NV_DEBUG(dev, "\n");
+ NV_INFO(dev, "Detected a DAC output\n");
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ nv_encoder->disconnect = nv50_dac_disconnect;
+
+ drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
new file mode 100644
index 00000000000..12c5ee63495
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "nv50_display.h"
+#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_fb.h"
+#include "drm_crtc_helper.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pchan)
+{
+ struct nouveau_channel *chan = *pchan;
+
+ if (!chan)
+ return;
+ *pchan = NULL;
+
+ nouveau_gpuobj_channel_takedown(chan);
+ nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+
+ if (chan->user)
+ iounmap(chan->user);
+
+ kfree(chan);
+}
+
+static int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
+ uint32_t tile_flags, uint32_t magic_flags,
+ uint32_t offset, uint32_t limit)
+{
+ struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+ struct drm_device *dev = evo->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+ ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &obj);
+ return ret;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+ nv_wo32(dev, obj, 1, limit);
+ nv_wo32(dev, obj, 2, offset);
+ nv_wo32(dev, obj, 3, 0x00000000);
+ nv_wo32(dev, obj, 4, 0x00000000);
+ nv_wo32(dev, obj, 5, 0x00010000);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ *pchan = chan;
+
+ chan->id = -1;
+ chan->dev = dev;
+ chan->user_get = 4;
+ chan->user_put = 0;
+
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
+ if (ret) {
+ NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
+ im_pramin->start, 32768);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
+ 0, &chan->ramht);
+ if (ret) {
+ NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ if (dev_priv->chipset != 0x50) {
+ ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
+ 0, 0xffffffff);
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+
+ ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
+ 0, 0xffffffff);
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+ }
+
+ ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, nouveau_mem_fb_amount(dev));
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ false, true, &chan->pushbuf_bo);
+ if (ret == 0)
+ ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_bo_map(chan->pushbuf_bo);
+ if (ret) {
+ NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_PDISPLAY_USER(0), PAGE_SIZE);
+ if (!chan->user) {
+ NV_ERROR(dev, "Error mapping EVO control regs.\n");
+ nv50_evo_channel_del(pchan);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_connector *connector;
+ uint32_t val, ram_amount, hpd_en[2];
+ uint64_t start;
+ int ret, i;
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+ /*
+ * I think the 0x006101XX range is some kind of main control area
+ * that enables things.
+ */
+ /* CRTC? */
+ for (i = 0; i < 2; i++) {
+ val = nv_rd32(dev, 0x00616100 + (i * 0x800));
+ nv_wr32(dev, 0x00610190 + (i * 0x10), val);
+ val = nv_rd32(dev, 0x00616104 + (i * 0x800));
+ nv_wr32(dev, 0x00610194 + (i * 0x10), val);
+ val = nv_rd32(dev, 0x00616108 + (i * 0x800));
+ nv_wr32(dev, 0x00610198 + (i * 0x10), val);
+ val = nv_rd32(dev, 0x0061610c + (i * 0x800));
+ nv_wr32(dev, 0x0061019c + (i * 0x10), val);
+ }
+ /* DAC */
+ for (i = 0; i < 3; i++) {
+ val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
+ nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
+ }
+ /* SOR */
+ for (i = 0; i < 4; i++) {
+ val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
+ nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
+ }
+ /* Something not yet in use, tv-out maybe. */
+ for (i = 0; i < 3; i++) {
+ val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
+ nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
+ }
+
+ for (i = 0; i < 3; i++) {
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+ nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
+ }
+
+ /* This used to be in crtc unblank, but seems out of place there. */
+ nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
+ /* RAM is clamped to 256 MiB. */
+ ram_amount = nouveau_mem_fb_amount(dev);
+ NV_DEBUG(dev, "ram_amount %d\n", ram_amount);
+ if (ram_amount > 256*1024*1024)
+ ram_amount = 256*1024*1024;
+ nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
+ nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
+ nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
+
+ /* The precise purpose is unknown, i suspect it has something to do
+ * with text mode.
+ */
+ if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
+ nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
+ if (!nv_wait(0x006194e8, 2, 0)) {
+ NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
+ NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
+ nv_rd32(dev, 0x6194e8));
+ return -EBUSY;
+ }
+ }
+
+ /* taken from nv bug #12637, attempts to un-wedge the hw if it's
+ * stuck in some unspecified state
+ */
+ start = ptimer->read(dev);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
+ while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
+ if ((val & 0x9f0000) == 0x20000)
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ val | 0x800000);
+
+ if ((val & 0x3f0000) == 0x30000)
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ val | 0x200000);
+
+ if (ptimer->read(dev) - start > 1000000000ULL) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
+ if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 2; i++) {
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
+ if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+ NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+ NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
+ if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
+ NV_ERROR(dev, "timeout: "
+ "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
+ NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
+
+ /* initialise fifo */
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
+ ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
+ NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
+ NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
+ if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
+ return -EBUSY;
+ }
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
+ NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
+ nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
+ NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
+ nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
+
+ evo->dma.max = (4096/4) - 2;
+ evo->dma.put = 0;
+ evo->dma.cur = evo->dma.put;
+ evo->dma.free = evo->dma.max - evo->dma.cur;
+
+ ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(evo, 0);
+
+ ret = RING_SPACE(evo, 11);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
+ OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+ OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
+ NV_ERROR(dev, "evo pushbuf stalled\n");
+
+ /* enable clock change interrupts. */
+ nv_wr32(dev, 0x610028, 0x00010001);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
+ NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
+ NV50_PDISPLAY_INTR_EN_CLK_UNK40));
+
+ /* enable hotplug interrupts */
+ hpd_en[0] = hpd_en[1] = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ struct dcb_gpio_entry *gpio;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
+ if (!gpio)
+ continue;
+
+ hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
+ }
+
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ nv_wr32(dev, 0xe050, hpd_en[0]);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ nv_wr32(dev, 0xe070, hpd_en[1]);
+ }
+
+ return 0;
+}
+
+static int nv50_display_disable(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_crtc *drm_crtc;
+ int ret, i;
+
+ NV_DEBUG(dev, "\n");
+
+ list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+
+ nv50_crtc_blank(crtc, true);
+ }
+
+ ret = RING_SPACE(dev_priv->evo, 2);
+ if (ret == 0) {
+ BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(dev_priv->evo, 0);
+ }
+ FIRE_RING(dev_priv->evo);
+
+ /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
+ * cleaning up?
+ */
+ list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+ uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+
+ if (!crtc->base.enabled)
+ continue;
+
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
+ if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
+ NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
+ "0x%08x\n", mask, mask);
+ NV_ERROR(dev, "0x610024 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_INTR_1));
+ }
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
+ nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
+ if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
+ NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
+ NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
+ NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
+ nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
+ }
+ }
+
+ /* disable interrupts. */
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+
+ /* disable hotplug interrupts */
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ nv_wr32(dev, 0xe050, 0x00000000);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ nv_wr32(dev, 0xe070, 0x00000000);
+ }
+ return 0;
+}
+
+int nv50_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+ uint32_t connector[16] = {};
+ int ret, i;
+
+ NV_DEBUG(dev, "\n");
+
+ /* init basic kernel modesetting */
+ drm_mode_config_init(dev);
+
+ /* Initialise some optional connector properties. */
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dithering_property(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+
+ dev->mode_config.fb_base = dev_priv->fb_phys;
+
+ /* Create EVO channel */
+ ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ if (ret) {
+ NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
+ return ret;
+ }
+
+ /* Create CRTC objects */
+ for (i = 0; i < 2; i++)
+ nv50_crtc_create(dev, i);
+
+ /* We setup the encoders from the BIOS table */
+ for (i = 0 ; i < dcb->entries; i++) {
+ struct dcb_entry *entry = &dcb->entry[i];
+
+ if (entry->location != DCB_LOC_ON_CHIP) {
+ NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
+ entry->type, ffs(entry->or) - 1);
+ continue;
+ }
+
+ switch (entry->type) {
+ case OUTPUT_TMDS:
+ case OUTPUT_LVDS:
+ case OUTPUT_DP:
+ nv50_sor_create(dev, entry);
+ break;
+ case OUTPUT_ANALOG:
+ nv50_dac_create(dev, entry);
+ break;
+ default:
+ NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
+ continue;
+ }
+
+ connector[entry->connector] |= (1 << entry->type);
+ }
+
+ /* It appears that DCB 3.0+ VBIOS has a connector table, however,
+ * I'm not 100% certain how to decode it correctly yet so just
+ * look at what encoders are present on each connector index and
+ * attempt to derive the connector type from that.
+ */
+ for (i = 0 ; i < dcb->entries; i++) {
+ struct dcb_entry *entry = &dcb->entry[i];
+ uint16_t encoders;
+ int type;
+
+ encoders = connector[entry->connector];
+ if (!(encoders & (1 << entry->type)))
+ continue;
+ connector[entry->connector] = 0;
+
+ if (encoders & (1 << OUTPUT_DP)) {
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ type = DRM_MODE_CONNECTOR_DVII;
+ else
+ type = DRM_MODE_CONNECTOR_DVID;
+ } else if (encoders & (1 << OUTPUT_ANALOG)) {
+ type = DRM_MODE_CONNECTOR_VGA;
+ } else if (encoders & (1 << OUTPUT_LVDS)) {
+ type = DRM_MODE_CONNECTOR_LVDS;
+ } else {
+ type = DRM_MODE_CONNECTOR_Unknown;
+ }
+
+ if (type == DRM_MODE_CONNECTOR_Unknown)
+ continue;
+
+ nouveau_connector_create(dev, entry->connector, type);
+ }
+
+ ret = nv50_display_init(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int nv50_display_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG(dev, "\n");
+
+ drm_mode_config_cleanup(dev);
+
+ nv50_display_disable(dev);
+ nv50_evo_channel_del(&dev_priv->evo);
+
+ return 0;
+}
+
+static inline uint32_t
+nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t mc;
+
+ if (sor) {
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
+ else
+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
+ } else {
+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
+ }
+
+ return mc;
+}
+
+static int
+nv50_display_irq_head(struct drm_device *dev, int *phead,
+ struct dcb_entry **pdcbent)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
+ uint32_t dac = 0, sor = 0;
+ int head, i, or = 0, type = OUTPUT_ANY;
+
+ /* We're assuming that head 0 *or* head 1 will be active here,
+ * and not both. I'm not sure if the hw will even signal both
+ * ever, but it definitely shouldn't for us as we commit each
+ * CRTC separately, and submission will be blocked by the GPU
+ * until we handle each in turn.
+ */
+ NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30);
+ head = ffs((unk30 >> 9) & 3) - 1;
+ if (head < 0)
+ return -EINVAL;
+
+ /* This assumes CRTCs are never bound to multiple encoders, which
+ * should be the case.
+ */
+ for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
+ uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
+ if (!(mc & (1 << head)))
+ continue;
+
+ switch ((mc >> 8) & 0xf) {
+ case 0: type = OUTPUT_ANALOG; break;
+ case 1: type = OUTPUT_TV; break;
+ default:
+ NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
+ return -1;
+ }
+
+ or = i;
+ }
+
+ for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
+ uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
+ if (!(mc & (1 << head)))
+ continue;
+
+ switch ((mc >> 8) & 0xf) {
+ case 0: type = OUTPUT_LVDS; break;
+ case 1: type = OUTPUT_TMDS; break;
+ case 2: type = OUTPUT_TMDS; break;
+ case 5: type = OUTPUT_TMDS; break;
+ case 8: type = OUTPUT_DP; break;
+ case 9: type = OUTPUT_DP; break;
+ default:
+ NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
+ return -1;
+ }
+
+ or = i;
+ }
+
+ NV_DEBUG(dev, "type %d, or %d\n", type, or);
+ if (type == OUTPUT_ANY) {
+ NV_ERROR(dev, "unknown encoder!!\n");
+ return -1;
+ }
+
+ for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
+ struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
+
+ if (dcbent->type != type)
+ continue;
+
+ if (!(dcbent->or & (1 << or)))
+ continue;
+
+ *phead = head;
+ *pdcbent = dcbent;
+ return 0;
+ }
+
+ NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
+ return 0;
+}
+
+static uint32_t
+nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
+ int pxclk)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint32_t mc, script = 0, or;
+
+ or = ffs(dcbent->or) - 1;
+ mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
+ switch (dcbent->type) {
+ case OUTPUT_LVDS:
+ script = (mc >> 8) & 0xf;
+ if (bios->pub.fp_no_ddc) {
+ if (bios->fp.dual_link)
+ script |= 0x0100;
+ if (bios->fp.if_is_24bit)
+ script |= 0x0200;
+ } else {
+ if (pxclk >= bios->fp.duallink_transition_clk) {
+ script |= 0x0100;
+ if (bios->fp.strapless_is_24bit & 2)
+ script |= 0x0200;
+ } else
+ if (bios->fp.strapless_is_24bit & 1)
+ script |= 0x0200;
+ }
+
+ if (nouveau_uscript_lvds >= 0) {
+ NV_INFO(dev, "override script 0x%04x with 0x%04x "
+ "for output LVDS-%d\n", script,
+ nouveau_uscript_lvds, or);
+ script = nouveau_uscript_lvds;
+ }
+ break;
+ case OUTPUT_TMDS:
+ script = (mc >> 8) & 0xf;
+ if (pxclk >= 165000)
+ script |= 0x0100;
+
+ if (nouveau_uscript_tmds >= 0) {
+ NV_INFO(dev, "override script 0x%04x with 0x%04x "
+ "for output TMDS-%d\n", script,
+ nouveau_uscript_tmds, or);
+ script = nouveau_uscript_tmds;
+ }
+ break;
+ case OUTPUT_DP:
+ script = (mc >> 8) & 0xf;
+ break;
+ case OUTPUT_ANALOG:
+ script = 0xff;
+ break;
+ default:
+ NV_ERROR(dev, "modeset on unsupported output type!\n");
+ break;
+ }
+
+ return script;
+}
+
+static void
+nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
+ chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+
+ nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
+ chan->nvsw.vblsem_rval);
+ list_del(&chan->nvsw.vbl_wait);
+ }
+}
+
+static void
+nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
+{
+ intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+
+ if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
+ nv50_display_vblank_crtc_handler(dev, 0);
+
+ if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
+ nv50_display_vblank_crtc_handler(dev, 1);
+
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
+ NV50_PDISPLAY_INTR_EN) & ~intr);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+}
+
+static void
+nv50_display_unk10_handler(struct drm_device *dev)
+{
+ struct dcb_entry *dcbent;
+ int head, ret;
+
+ ret = nv50_display_irq_head(dev, &head, &dcbent);
+ if (ret)
+ goto ack;
+
+ nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
+
+ nouveau_bios_run_display_table(dev, dcbent, 0, -1);
+
+ack:
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
+ nv_wr32(dev, 0x610030, 0x80000000);
+}
+
+static void
+nv50_display_unk20_handler(struct drm_device *dev)
+{
+ struct dcb_entry *dcbent;
+ uint32_t tmp, pclk, script;
+ int head, or, ret;
+
+ ret = nv50_display_irq_head(dev, &head, &dcbent);
+ if (ret)
+ goto ack;
+ or = ffs(dcbent->or) - 1;
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
+ script = nv50_display_script_select(dev, dcbent, pclk);
+
+ NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk);
+
+ if (dcbent->type != OUTPUT_DP)
+ nouveau_bios_run_display_table(dev, dcbent, 0, -2);
+
+ nv50_crtc_set_clock(dev, head, pclk);
+
+ nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
+ tmp &= ~0x000000f;
+ nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
+
+ if (dcbent->type != OUTPUT_ANALOG) {
+ tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
+ tmp &= ~0x00000f0f;
+ if (script & 0x0100)
+ tmp |= 0x00000101;
+ nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
+ } else {
+ nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+ }
+
+ack:
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
+ nv_wr32(dev, 0x610030, 0x80000000);
+}
+
+static void
+nv50_display_unk40_handler(struct drm_device *dev)
+{
+ struct dcb_entry *dcbent;
+ int head, pclk, script, ret;
+
+ ret = nv50_display_irq_head(dev, &head, &dcbent);
+ if (ret)
+ goto ack;
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
+ script = nv50_display_script_select(dev, dcbent, pclk);
+
+ nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
+
+ack:
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
+ nv_wr32(dev, 0x610030, 0x80000000);
+ nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
+}
+
+void
+nv50_display_irq_handler_bh(struct work_struct *work)
+{
+ struct drm_nouveau_private *dev_priv =
+ container_of(work, struct drm_nouveau_private, irq_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ for (;;) {
+ uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
+ uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+
+ NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+
+ if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
+ nv50_display_unk10_handler(dev);
+ else
+ if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
+ nv50_display_unk20_handler(dev);
+ else
+ if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
+ nv50_display_unk40_handler(dev);
+ else
+ break;
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
+}
+
+static void
+nv50_display_error_handler(struct drm_device *dev)
+{
+ uint32_t addr, data;
+
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
+ addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
+ data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
+
+ NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
+ 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+
+ nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
+}
+
+static void
+nv50_display_irq_hotplug(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+ uint32_t unplug_mask, plug_mask, change_mask;
+ uint32_t hpd0, hpd1 = 0;
+
+ hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+ plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
+ unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
+ change_mask = plug_mask | unplug_mask;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_encoder_helper_funcs *helper;
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder;
+ struct dcb_gpio_entry *gpio;
+ uint32_t reg;
+ bool plugged;
+
+ if (!nv_connector->dcb)
+ continue;
+
+ gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
+ if (!gpio || !(change_mask & (1 << gpio->line)))
+ continue;
+
+ reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
+ plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
+ NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector)) ;
+
+ if (!connector->encoder || !connector->encoder->crtc ||
+ !connector->encoder->crtc->enabled)
+ continue;
+ nv_encoder = nouveau_encoder(connector->encoder);
+ helper = connector->encoder->helper_private;
+
+ if (nv_encoder->dcb->type != OUTPUT_DP)
+ continue;
+
+ if (plugged)
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+ else
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ }
+
+ nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+}
+
+void
+nv50_display_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t delayed = 0;
+
+ while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG)
+ nv50_display_irq_hotplug(dev);
+
+ while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
+ uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
+ uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+ uint32_t clock;
+
+ NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+
+ if (!intr0 && !(intr1 & ~delayed))
+ break;
+
+ if (intr0 & 0x00010000) {
+ nv50_display_error_handler(dev);
+ intr0 &= ~0x00010000;
+ }
+
+ if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
+ nv50_display_vblank_handler(dev, intr1);
+ intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+ }
+
+ clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
+ NV50_PDISPLAY_INTR_1_CLK_UNK20 |
+ NV50_PDISPLAY_INTR_1_CLK_UNK40));
+ if (clock) {
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+ if (!work_pending(&dev_priv->irq_work))
+ queue_work(dev_priv->wq, &dev_priv->irq_work);
+ delayed |= clock;
+ intr1 &= ~clock;
+ }
+
+ if (intr0) {
+ NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
+ }
+
+ if (intr1) {
+ NV_ERROR(dev,
+ "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
new file mode 100644
index 00000000000..3ae8d0725f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV50_DISPLAY_H__
+#define __NV50_DISPLAY_H__
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_reg.h"
+#include "nouveau_crtc.h"
+#include "nv50_evo.h"
+
+void nv50_display_irq_handler(struct drm_device *dev);
+void nv50_display_irq_handler_bh(struct work_struct *work);
+int nv50_display_init(struct drm_device *dev);
+int nv50_display_create(struct drm_device *dev);
+int nv50_display_destroy(struct drm_device *dev);
+int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
+int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+
+#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
new file mode 100644
index 00000000000..aae13343bce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define NV50_EVO_UPDATE 0x00000080
+#define NV50_EVO_UNK84 0x00000084
+#define NV50_EVO_UNK84_NOTIFY 0x40000000
+#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
+#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
+#define NV50_EVO_DMA_NOTIFY 0x00000088
+#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
+#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
+#define NV50_EVO_UNK8C 0x0000008C
+
+#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
+#define NV50_EVO_DAC_MODE_CTRL 0x00000400
+#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
+#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
+#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
+#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
+#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
+
+#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
+#define NV50_EVO_SOR_MODE_CTRL 0x00000600
+#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
+#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
+#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
+#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
+#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
+#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
+
+#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
+#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
+#define NV50_EVO_CRTC_UNK0800 0x00000800
+#define NV50_EVO_CRTC_CLOCK 0x00000804
+#define NV50_EVO_CRTC_INTERLACE 0x00000808
+#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
+#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
+#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
+#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
+#define NV50_EVO_CRTC_UNK0820 0x00000820
+#define NV50_EVO_CRTC_UNK0824 0x00000824
+#define NV50_EVO_CRTC_UNK082C 0x0000082c
+#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
+/* You can't have a palette in 8 bit mode (=OFF) */
+#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
+#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
+#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
+#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
+#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
+#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
+#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
+#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
+#define NV50_EVO_CRTC_FB_SIZE 0x00000868
+#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
+#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
+#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
+#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
+#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
+#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
+#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
+#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
+#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
+#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
+#define NV50_EVO_CRTC_FB_DMA 0x00000874
+#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
+#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
+#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
+#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
+#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
+#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
+#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
+#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
+#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
+#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
+#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
+#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
+#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
+#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
+#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
+#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
+#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000
+#define NV50_EVO_CRTC_FB_POS 0x000008c0
+#define NV50_EVO_CRTC_REAL_RES 0x000008c8
+#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
+#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
+ ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
+/* Both of these are needed, otherwise nothing happens. */
+#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
+#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
new file mode 100644
index 00000000000..6bcc6d39e9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -0,0 +1,273 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+static void
+nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_fillrect(info, rect);
+ return;
+ }
+
+ if (rect->rop != ROP_COPY) {
+ BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ OUT_RING(chan, 1);
+ }
+ BEGIN_RING(chan, NvSub2D, 0x0588, 1);
+ OUT_RING(chan, rect->color);
+ BEGIN_RING(chan, NvSub2D, 0x0600, 4);
+ OUT_RING(chan, rect->dx);
+ OUT_RING(chan, rect->dy);
+ OUT_RING(chan, rect->dx + rect->width);
+ OUT_RING(chan, rect->dy + rect->height);
+ if (rect->rop != ROP_COPY) {
+ BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ OUT_RING(chan, 3);
+ }
+ FIRE_RING(chan);
+}
+
+static void
+nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_copyarea(info, region);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSub2D, 0x0110, 1);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
+ OUT_RING(chan, region->dx);
+ OUT_RING(chan, region->dy);
+ OUT_RING(chan, region->width);
+ OUT_RING(chan, region->height);
+ BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, region->sx);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, region->sy);
+ FIRE_RING(chan);
+}
+
+static void
+nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ uint32_t *palette = info->pseudo_palette;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (image->depth != 1) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ width = (image->width + 31) & ~31;
+ dwords = (width * image->height) >> 5;
+
+ BEGIN_RING(chan, NvSub2D, 0x0814, 2);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ OUT_RING(chan, palette[image->bg_color] | mask);
+ OUT_RING(chan, palette[image->fg_color] | mask);
+ } else {
+ OUT_RING(chan, image->bg_color);
+ OUT_RING(chan, image->fg_color);
+ }
+ BEGIN_RING(chan, NvSub2D, 0x0838, 2);
+ OUT_RING(chan, image->width);
+ OUT_RING(chan, image->height);
+ BEGIN_RING(chan, NvSub2D, 0x0850, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, image->dx);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, image->dy);
+
+ while (dwords) {
+ int push = dwords > 2047 ? 2047 : dwords;
+
+ if (RING_SPACE(chan, push + 1)) {
+ NV_ERROR(dev,
+ "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ dwords -= push;
+
+ BEGIN_RING(chan, NvSub2D, 0x40000860, push);
+ OUT_RINGp(chan, data, push);
+ data += push;
+ }
+
+ FIRE_RING(chan);
+}
+
+int
+nv50_fbcon_accel_init(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_gpuobj *eng2d = NULL;
+ int ret, format;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = 0xf3;
+ break;
+ case 15:
+ format = 0xf8;
+ break;
+ case 16:
+ format = 0xe8;
+ break;
+ case 32:
+ switch (info->var.transp.length) {
+ case 0: /* depth 24 */
+ case 8: /* depth 32, just use 24.. */
+ format = 0xe6;
+ break;
+ case 2: /* depth 30 */
+ format = 0xd1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 59);
+ if (ret) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ return ret;
+ }
+
+ BEGIN_RING(chan, NvSub2D, 0x0000, 1);
+ OUT_RING(chan, Nv2D);
+ BEGIN_RING(chan, NvSub2D, 0x0180, 4);
+ OUT_RING(chan, NvNotify0);
+ OUT_RING(chan, chan->vram_handle);
+ OUT_RING(chan, chan->vram_handle);
+ OUT_RING(chan, chan->vram_handle);
+ BEGIN_RING(chan, NvSub2D, 0x0290, 1);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSub2D, 0x0888, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ OUT_RING(chan, 3);
+ BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
+ OUT_RING(chan, 0x55);
+ BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0580, 2);
+ OUT_RING(chan, 4);
+ OUT_RING(chan, format);
+ BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
+ OUT_RING(chan, 2);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0804, 1);
+ OUT_RING(chan, format);
+ BEGIN_RING(chan, NvSub2D, 0x0800, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0808, 3);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSub2D, 0x081c, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0840, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0200, 2);
+ OUT_RING(chan, format);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0214, 5);
+ OUT_RING(chan, info->fix.line_length);
+ OUT_RING(chan, info->var.xres_virtual);
+ OUT_RING(chan, info->var.yres_virtual);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
+ dev_priv->vm_vram_base);
+ BEGIN_RING(chan, NvSub2D, 0x0230, 2);
+ OUT_RING(chan, format);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0244, 5);
+ OUT_RING(chan, info->fix.line_length);
+ OUT_RING(chan, info->var.xres_virtual);
+ OUT_RING(chan, info->var.yres_virtual);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
+ dev_priv->vm_vram_base);
+
+ info->fbops->fb_fillrect = nv50_fbcon_fillrect;
+ info->fbops->fb_copyarea = nv50_fbcon_copyarea;
+ info->fbops->fb_imageblit = nv50_fbcon_imageblit;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 00000000000..77ae1aaa0bc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+struct nv50_fifo_priv {
+ struct nouveau_gpuobj_ref *thingo[2];
+ int cur_thingo;
+};
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_fifo_init_thingo(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+ struct nouveau_gpuobj_ref *cur;
+ int i, nr;
+
+ NV_DEBUG(dev, "\n");
+
+ cur = priv->thingo[priv->cur_thingo];
+ priv->cur_thingo = !priv->cur_thingo;
+
+ /* We never schedule channel 0 or 127 */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 1, nr = 0; i < 127; i++) {
+ if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
+ nv_wo32(dev, cur->gpuobj, nr++, i);
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x32f4, cur->instance >> 12);
+ nv_wr32(dev, 0x32ec, nr);
+ nv_wr32(dev, 0x2500, 0x101);
+}
+
+static int
+nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[channel];
+ uint32_t inst;
+
+ NV_DEBUG(dev, "ch%d\n", channel);
+
+ if (!chan->ramfc)
+ return -EINVAL;
+
+ if (IS_G80)
+ inst = chan->ramfc->instance >> 12;
+ else
+ inst = chan->ramfc->instance >> 8;
+ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
+ inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+
+ if (!nt)
+ nv50_fifo_init_thingo(dev);
+ return 0;
+}
+
+static void
+nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+
+ NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
+
+ if (IS_G80)
+ inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
+ else
+ inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
+ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
+
+ if (!nt)
+ nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_reset(struct drm_device *dev)
+{
+ uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
+}
+
+static void
+nv50_fifo_init_intr(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+}
+
+static void
+nv50_fifo_init_context_table(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ NV_DEBUG(dev, "\n");
+
+ for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
+ if (dev_priv->fifos[i])
+ nv50_fifo_channel_enable(dev, i, true);
+ else
+ nv50_fifo_channel_disable(dev, i, true);
+ }
+
+ nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_regs__nv(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, 0x250c, 0x6f3cfc34);
+}
+
+static void
+nv50_fifo_init_regs(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, 0x2500, 0);
+ nv_wr32(dev, 0x3250, 0);
+ nv_wr32(dev, 0x3220, 0);
+ nv_wr32(dev, 0x3204, 0);
+ nv_wr32(dev, 0x3210, 0);
+ nv_wr32(dev, 0x3270, 0);
+
+ /* Enable dummy channels setup by nv50_instmem.c */
+ nv50_fifo_channel_enable(dev, 0, true);
+ nv50_fifo_channel_enable(dev, 127, true);
+}
+
+int
+nv50_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ priv = dev_priv->engine.fifo.priv;
+ if (priv) {
+ priv->cur_thingo = !priv->cur_thingo;
+ goto just_reset;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.fifo.priv = priv;
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
+ if (ret) {
+ NV_ERROR(dev, "error creating thingo0: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
+ if (ret) {
+ NV_ERROR(dev, "error creating thingo1: %d\n", ret);
+ return ret;
+ }
+
+just_reset:
+ nv50_fifo_init_reset(dev);
+ nv50_fifo_init_intr(dev);
+ nv50_fifo_init_context_table(dev);
+ nv50_fifo_init_regs__nv(dev);
+ nv50_fifo_init_regs(dev);
+ dev_priv->engine.fifo.enable(dev);
+ dev_priv->engine.fifo.reassign(dev, true);
+
+ return 0;
+}
+
+void
+nv50_fifo_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!priv)
+ return;
+
+ nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
+ nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
+
+ dev_priv->engine.fifo.priv = NULL;
+ kfree(priv);
+}
+
+int
+nv50_fifo_channel_id(struct drm_device *dev)
+{
+ return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv50_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = NULL;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ if (IS_G80) {
+ uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
+ uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
+
+ ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
+ 0x100, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &ramfc,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
+ ramin_voffset + 0x0400, 4096,
+ 0, NULL, &chan->cache);
+ if (ret)
+ return ret;
+ } else {
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+ ramfc = chan->ramfc->gpuobj;
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
+ 0, &chan->cache);
+ if (ret)
+ return ret;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+
+ nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
+ nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
+ nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
+ nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
+ nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
+ nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
+ nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
+ nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
+ nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
+ nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
+ nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
+
+ if (!IS_G80) {
+ nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
+ nv_wo32(dev, chan->ramin->gpuobj, 1,
+ chan->ramfc->instance >> 8);
+
+ nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
+ nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
+ }
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ ret = nv50_fifo_channel_enable(dev, chan->id, false);
+ if (ret) {
+ NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nv50_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->cache);
+
+ nv50_fifo_channel_disable(dev, chan->id, false);
+
+ /* Dummy channel, also used on ch 127 */
+ if (chan->id == 0)
+ nv50_fifo_channel_disable(dev, 127, false);
+}
+
+int
+nv50_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
+ struct nouveau_gpuobj *cache = chan->cache->gpuobj;
+ int ptr, cnt;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
+ nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
+ nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
+ nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
+ nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
+ nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
+ nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
+ nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
+ nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
+ nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
+ nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
+ nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
+ nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
+ nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
+ nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
+ nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
+ nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
+ nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
+ nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
+ nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
+ nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
+ nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
+ nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
+ nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
+ nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
+ nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
+ nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
+ nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
+ nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
+ nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
+ nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
+ nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
+ nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
+
+ cnt = nv_ro32(dev, ramfc, 0x84/4);
+ for (ptr = 0; ptr < cnt; ptr++) {
+ nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
+ nv_ro32(dev, cache, (ptr * 2) + 0));
+ nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
+ nv_ro32(dev, cache, (ptr * 2) + 1));
+ }
+ nv_wr32(dev, 0x3210, cnt << 2);
+ nv_wr32(dev, 0x3270, 0);
+
+ /* guessing that all the 0x34xx regs aren't on NV50 */
+ if (!IS_G80) {
+ nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
+ nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
+ nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
+ nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
+ nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
+ }
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
+ return 0;
+}
+
+int
+nv50_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_gpuobj *ramfc, *cache;
+ struct nouveau_channel *chan = NULL;
+ int chid, get, put, ptr;
+
+ NV_DEBUG(dev, "\n");
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ return 0;
+
+ chan = dev_priv->fifos[chid];
+ if (!chan) {
+ NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
+ return -EINVAL;
+ }
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+ ramfc = chan->ramfc->gpuobj;
+ cache = chan->cache->gpuobj;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+
+ nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
+ nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
+ nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
+ nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
+ nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
+ nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
+ nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
+ nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
+ nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
+ nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
+ nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
+ nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
+ nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
+ nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
+ nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
+ nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
+ nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
+ nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
+ nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
+ nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
+ nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
+ nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
+ nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
+ nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
+ nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
+ nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
+ nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
+ nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
+ nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
+ nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
+ nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
+ nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
+ nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
+
+ put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
+ get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
+ ptr = 0;
+ while (put != get) {
+ nv_wo32(dev, cache, ptr++,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
+ nv_wo32(dev, cache, ptr++,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
+ get = (get + 1) & 0x1ff;
+ }
+
+ /* guessing that all the 0x34xx regs aren't on NV50 */
+ if (!IS_G80) {
+ nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
+ nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
+ nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
+ nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
+ nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
+ nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
+ }
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /*XXX: probably reload ch127 (NULL) state back too */
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
new file mode 100644
index 00000000000..177d8229336
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+MODULE_FIRMWARE("nouveau/nv50.ctxprog");
+MODULE_FIRMWARE("nouveau/nv50.ctxvals");
+MODULE_FIRMWARE("nouveau/nv84.ctxprog");
+MODULE_FIRMWARE("nouveau/nv84.ctxvals");
+MODULE_FIRMWARE("nouveau/nv86.ctxprog");
+MODULE_FIRMWARE("nouveau/nv86.ctxvals");
+MODULE_FIRMWARE("nouveau/nv92.ctxprog");
+MODULE_FIRMWARE("nouveau/nv92.ctxvals");
+MODULE_FIRMWARE("nouveau/nv94.ctxprog");
+MODULE_FIRMWARE("nouveau/nv94.ctxvals");
+MODULE_FIRMWARE("nouveau/nv96.ctxprog");
+MODULE_FIRMWARE("nouveau/nv96.ctxvals");
+MODULE_FIRMWARE("nouveau/nv98.ctxprog");
+MODULE_FIRMWARE("nouveau/nv98.ctxvals");
+MODULE_FIRMWARE("nouveau/nva0.ctxprog");
+MODULE_FIRMWARE("nouveau/nva0.ctxvals");
+MODULE_FIRMWARE("nouveau/nva5.ctxprog");
+MODULE_FIRMWARE("nouveau/nva5.ctxvals");
+MODULE_FIRMWARE("nouveau/nva8.ctxprog");
+MODULE_FIRMWARE("nouveau/nva8.ctxvals");
+MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
+MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
+MODULE_FIRMWARE("nouveau/nvac.ctxprog");
+MODULE_FIRMWARE("nouveau/nvac.ctxvals");
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_graph_init_reset(struct drm_device *dev)
+{
+ uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
+}
+
+static void
+nv50_graph_init_intr(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
+}
+
+static void
+nv50_graph_init_regs__nv(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x401804, 0xc0000000);
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+
+ nv_wr32(dev, 0x400824, 0x00004000);
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static void
+nv50_graph_init_regs(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
+ (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
+ nv_wr32(dev, 0x402ca8, 0x800);
+}
+
+static int
+nv50_graph_init_ctxctl(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv40_grctx_init(dev);
+
+ nv_wr32(dev, 0x400320, 4);
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
+ return 0;
+}
+
+int
+nv50_graph_init(struct drm_device *dev)
+{
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ nv50_graph_init_reset(dev);
+ nv50_graph_init_regs__nv(dev);
+ nv50_graph_init_regs(dev);
+ nv50_graph_init_intr(dev);
+
+ ret = nv50_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nv50_graph_takedown(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+ nv40_grctx_fini(dev);
+}
+
+void
+nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+ const uint32_t mask = 0x00010001;
+
+ if (enabled)
+ nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
+ else
+ nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
+}
+
+struct nouveau_channel *
+nv50_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int i;
+
+ inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
+ return NULL;
+ inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->ramin && chan->ramin->instance == inst)
+ return chan;
+ }
+
+ return NULL;
+}
+
+int
+nv50_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ struct nouveau_gpuobj *ctx;
+ uint32_t grctx_size = 0x70000;
+ int hdr, ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
+ if (ret)
+ return ret;
+ ctx = chan->ramin_grctx->gpuobj;
+
+ hdr = IS_G80 ? 0x200 : 0x20;
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
+ nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
+ grctx_size - 1);
+ nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
+ nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
+ nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
+ nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv40_grctx_vals_load(dev, ctx);
+ nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
+ if ((dev_priv->chipset & 0xf0) == 0xa0)
+ nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
+ else
+ nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ return 0;
+}
+
+void
+nv50_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, hdr = IS_G80 ? 0x200 : 0x20;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ if (!chan->ramin || !chan->ramin->gpuobj)
+ return;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = hdr; i < hdr + 24; i += 4)
+ nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+}
+
+static int
+nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
+{
+ uint32_t fifo = nv_rd32(dev, 0x400500);
+
+ nv_wr32(dev, 0x400500, fifo & ~1);
+ nv_wr32(dev, 0x400784, inst);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
+ nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
+ nv_wr32(dev, 0x400040, 0xffffffff);
+ (void)nv_rd32(dev, 0x400040);
+ nv_wr32(dev, 0x400040, 0x00000000);
+ nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
+
+ if (nouveau_wait_for_idle(dev))
+ nv_wr32(dev, 0x40032c, inst | (1<<31));
+ nv_wr32(dev, 0x400500, fifo);
+
+ return 0;
+}
+
+int
+nv50_graph_load_context(struct nouveau_channel *chan)
+{
+ uint32_t inst = chan->ramin->instance >> 12;
+
+ NV_DEBUG(chan->dev, "ch%d\n", chan->id);
+ return nv50_graph_do_load_context(chan->dev, inst);
+}
+
+int
+nv50_graph_unload_context(struct drm_device *dev)
+{
+ uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+
+ inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
+ return 0;
+ inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
+
+ nv_wr32(dev, 0x400500, fifo & ~1);
+ nv_wr32(dev, 0x400784, inst);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
+ nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
+ nouveau_wait_for_idle(dev);
+ nv_wr32(dev, 0x400500, fifo);
+
+ nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
+ return 0;
+}
+
+void
+nv50_graph_context_switch(struct drm_device *dev)
+{
+ uint32_t inst;
+
+ nv50_graph_unload_context(dev);
+
+ inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
+ inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
+ nv50_graph_do_load_context(dev, inst);
+
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
+}
+
+static int
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct nouveau_gpuobj_ref *ref = NULL;
+
+ if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ return -ENOENT;
+
+ if (nouveau_notifier_offset(ref->gpuobj, NULL))
+ return -EINVAL;
+
+ chan->nvsw.vblsem = ref->gpuobj;
+ chan->nvsw.vblsem_offset = ~0;
+ return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
+ return -ERANGE;
+
+ chan->nvsw.vblsem_offset = data >> 2;
+ return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ chan->nvsw.vblsem_rval = data;
+ return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
+ return -EINVAL;
+
+ if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
+ NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1,
+ NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
+ NV50_PDISPLAY_INTR_EN) |
+ NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
+ }
+
+ list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+ return 0;
+}
+
+static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
+ { 0x018c, nv50_graph_nvsw_dma_vblsem },
+ { 0x0400, nv50_graph_nvsw_vblsem_offset },
+ { 0x0404, nv50_graph_nvsw_vblsem_release_val },
+ { 0x0408, nv50_graph_nvsw_vblsem_release },
+ {}
+};
+
+struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
+ { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
+ { 0x0030, false, NULL }, /* null */
+ { 0x5039, false, NULL }, /* m2mf */
+ { 0x502d, false, NULL }, /* 2d */
+ { 0x50c0, false, NULL }, /* compute */
+ { 0x5097, false, NULL }, /* tesla (nv50) */
+ { 0x8297, false, NULL }, /* tesla (nv80/nv90) */
+ { 0x8397, false, NULL }, /* tesla (nva0) */
+ { 0x8597, false, NULL }, /* tesla (nva8) */
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
new file mode 100644
index 00000000000..94400f777e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+struct nv50_instmem_priv {
+ uint32_t save1700[5]; /* 0x1700->0x1710 */
+
+ struct nouveau_gpuobj_ref *pramin_pt;
+ struct nouveau_gpuobj_ref *pramin_bar;
+ struct nouveau_gpuobj_ref *fb_bar;
+
+ bool last_access_wr;
+};
+
+#define NV50_INSTMEM_PAGE_SHIFT 12
+#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
+#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
+
+/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
+ */
+#define BAR0_WI32(g, o, v) do { \
+ uint32_t offset; \
+ if ((g)->im_backing) { \
+ offset = (g)->im_backing_start; \
+ } else { \
+ offset = chan->ramin->gpuobj->im_backing_start; \
+ offset += (g)->im_pramin->start; \
+ } \
+ offset += (o); \
+ nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
+} while (0)
+
+int
+nv50_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
+ struct nv50_instmem_priv *priv;
+ int ret, i;
+ uint32_t v, save_nv001700;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.instmem.priv = priv;
+
+ /* Save state, will restore at takedown. */
+ for (i = 0x1700; i <= 0x1710; i += 4)
+ priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+
+ /* Reserve the last MiB of VRAM, we should probably try to avoid
+ * setting up the below tables over the top of the VBIOS image at
+ * some point.
+ */
+ dev_priv->ramin_rsvd_vram = 1 << 20;
+ c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
+ c_size = 128 << 10;
+ c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
+ c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
+ c_base = c_vmpd + 0x4000;
+ pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
+
+ NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
+ NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
+ (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
+ NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
+ NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
+
+ /* Determine VM layout, we need to do this first to make sure
+ * we allocate enough memory for all the page tables.
+ */
+ dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
+ dev_priv->vm_gart_size = NV50_VM_BLOCK;
+
+ dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
+ dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
+ if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
+ dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
+ dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
+ dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
+
+ dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
+
+ NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
+ dev_priv->vm_gart_base,
+ dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
+ NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
+ dev_priv->vm_vram_base,
+ dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
+
+ c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
+
+ /* Map BAR0 PRAMIN aperture over the memory we want to use */
+ save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
+ nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
+
+ /* Create a fake channel, and use it as our "dummy" channels 0/127.
+ * The main reason for creating a channel is so we can use the gpuobj
+ * code. However, it's probably worth noting that NVIDIA also setup
+ * their channels 0/127 with the same values they configure here.
+ * So, there may be some other reason for doing this.
+ *
+ * Have to create the entire channel manually, as the real channel
+ * creation code assumes we have PRAMIN access, and we don't until
+ * we're done here.
+ */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->id = 0;
+ chan->dev = dev;
+ chan->file_priv = (struct drm_file *)-2;
+ dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+
+ /* Channel's PRAMIN object + heap */
+ ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
+ NULL, &chan->ramin);
+ if (ret)
+ return ret;
+
+ if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
+ return -ENOMEM;
+
+ /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
+ ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
+ 0x4000, 0, NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < c_vmpd; i += 4)
+ BAR0_WI32(chan->ramin->gpuobj, i, 0);
+
+ /* VM page directory */
+ ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
+ 0x4000, 0, &chan->vm_pd, NULL);
+ if (ret)
+ return ret;
+ for (i = 0; i < 0x4000; i += 8) {
+ BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
+ BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
+ }
+
+ /* PRAMIN page table, cheat and map into VM at 0x0000000000.
+ * We map the entire fake channel into the start of the PRAMIN BAR
+ */
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
+ 0, &priv->pramin_pt);
+ if (ret)
+ return ret;
+
+ for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
+ if (v < (c_offset + c_size))
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
+ else
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ }
+
+ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
+ BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+
+ /* VRAM page table(s), mapped into VM at +1GiB */
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
+ NV50_VM_BLOCK/65536*8, 0, 0,
+ &chan->vm_vram_pt[i]);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
+ ret);
+ dev_priv->vm_vram_pt_nr = i;
+ return ret;
+ }
+ dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
+
+ for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
+ v += 4)
+ BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
+
+ BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
+ chan->vm_vram_pt[i]->instance | 0x61);
+ BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
+ }
+
+ /* DMA object for PRAMIN BAR */
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+ &priv->pramin_bar);
+ if (ret)
+ return ret;
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
+
+ /* DMA object for FB BAR */
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+ &priv->fb_bar);
+ if (ret)
+ return ret;
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
+ drm_get_resource_len(dev, 1) - 1);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
+
+ /* Poke the relevant regs, and pray it works :) */
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ nv_wr32(dev, NV50_PUNK_UNK1710, 0);
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ NV50_PUNK_BAR_CFG_BASE_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+ NV50_PUNK_BAR1_CTXDMA_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ NV50_PUNK_BAR3_CTXDMA_VALID);
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x1900 + (i*4), 0);
+
+ /* Assume that praying isn't enough, check that we can re-read the
+ * entire fake channel back from the PRAMIN BAR */
+ dev_priv->engine.instmem.prepare_access(dev, false);
+ for (i = 0; i < c_size; i += 4) {
+ if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
+ NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
+ i);
+ dev_priv->engine.instmem.finish_access(dev);
+ return -EINVAL;
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
+
+ /* Global PRAMIN heap */
+ if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ c_size, dev_priv->ramin_size - c_size)) {
+ dev_priv->ramin_heap = NULL;
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ }
+
+ /*XXX: incorrect, but needed to make hash func "work" */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ return 0;
+}
+
+void
+nv50_instmem_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ int i;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!priv)
+ return;
+
+ /* Restore state from before init */
+ for (i = 0x1700; i <= 0x1710; i += 4)
+ nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
+
+ nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+
+ /* Destroy dummy channel */
+ if (chan) {
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+ dev_priv->vm_vram_pt[i] = NULL;
+ }
+ dev_priv->vm_vram_pt_nr = 0;
+
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ nouveau_mem_takedown(&chan->ramin_heap);
+
+ dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
+ kfree(chan);
+ }
+
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
+}
+
+int
+nv50_instmem_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ int i;
+
+ ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
+ if (!ramin->im_backing_suspend)
+ return -ENOMEM;
+
+ for (i = 0; i < ramin->im_pramin->size; i += 4)
+ ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+ return 0;
+}
+
+void
+nv50_instmem_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ int i;
+
+ nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
+ for (i = 0; i < ramin->im_pramin->size; i += 4)
+ BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
+ vfree(ramin->im_backing_suspend);
+ ramin->im_backing_suspend = NULL;
+
+ /* Poke the relevant regs, and pray it works :) */
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ nv_wr32(dev, NV50_PUNK_UNK1710, 0);
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ NV50_PUNK_BAR_CFG_BASE_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+ NV50_PUNK_BAR1_CTXDMA_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ NV50_PUNK_BAR3_CTXDMA_VALID);
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x1900 + (i*4), 0);
+}
+
+int
+nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ uint32_t *sz)
+{
+ int ret;
+
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+ if (*sz == 0)
+ return -EINVAL;
+
+ ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
+ true, false, &gpuobj->im_backing);
+ if (ret) {
+ NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ return ret;
+ }
+
+ gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
+ gpuobj->im_backing_start <<= PAGE_SHIFT;
+
+ return 0;
+}
+
+void
+nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->engine.instmem.unbind(dev, gpuobj);
+ nouveau_bo_unpin(gpuobj->im_backing);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ uint32_t pte, pte_end, vram;
+
+ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
+ gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ vram = gpuobj->im_backing_start;
+
+ NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
+ gpuobj->im_pramin->start, pte, pte_end);
+ NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pte < pte_end) {
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+
+ pte += 8;
+ vram += NV50_INSTMEM_PAGE_SIZE;
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x100c80, 0x00040001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00060001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ uint32_t pte, pte_end;
+
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pte < pte_end) {
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+ pte += 8;
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
+void
+nv50_instmem_prepare_access(struct drm_device *dev, bool write)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+
+ priv->last_access_wr = write;
+}
+
+void
+nv50_instmem_finish_access(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+
+ if (priv->last_access_wr) {
+ nv_wr32(dev, 0x070000, 0x00000001);
+ if (!nv_wait(0x070000, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
new file mode 100644
index 00000000000..e0a9c3faa20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mc.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nv50_mc_init(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+ return 0;
+}
+
+void nv50_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
new file mode 100644
index 00000000000..8c280463a66
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
+{
+ struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int ret;
+
+ NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or);
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while disconnecting SOR\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ OUT_RING(evo, 0);
+}
+
+static void
+nv50_sor_dp_link_train(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct bit_displayport_encoder_table *dpe;
+ int dpe_headerlen;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe) {
+ NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
+ return;
+ }
+
+ if (dpe->script0) {
+ NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
+ nv_encoder->dcb);
+ }
+
+ if (!nouveau_dp_link_train(encoder))
+ NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
+
+ if (dpe->script1) {
+ NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
+ nv_encoder->dcb);
+ }
+}
+
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t val;
+ int or = nv_encoder->or;
+
+ NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+
+ /* wait for it to be done */
+ if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+ NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
+ NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
+ NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
+ }
+
+ val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
+
+ if (mode == DRM_MODE_DPMS_ON)
+ val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
+ else
+ val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
+
+ nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
+ NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
+ if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
+ NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
+ NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
+ NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
+ }
+
+ if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
+ nv50_sor_dp_link_train(encoder);
+}
+
+static void
+nv50_sor_save(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static void
+nv50_sor_restore(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *connector;
+
+ NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+
+ connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!connector) {
+ NV_ERROR(encoder->dev, "Encoder has no connector\n");
+ return false;
+ }
+
+ if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+ connector->native_mode) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+
+ return true;
+}
+
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
+ uint32_t mode_ctl = 0;
+ int ret;
+
+ NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ switch (nv_encoder->dcb->type) {
+ case OUTPUT_TMDS:
+ if (nv_encoder->dcb->sorconf.link & 1) {
+ if (adjusted_mode->clock < 165000)
+ mode_ctl = 0x0100;
+ else
+ mode_ctl = 0x0500;
+ } else
+ mode_ctl = 0x0200;
+ break;
+ case OUTPUT_DP:
+ mode_ctl |= 0x00050000;
+ if (nv_encoder->dcb->sorconf.link & 1)
+ mode_ctl |= 0x00000800;
+ else
+ mode_ctl |= 0x00000900;
+ break;
+ default:
+ break;
+ }
+
+ if (crtc->index == 1)
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
+ else
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while connecting SOR\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ OUT_RING(evo, mode_ctl);
+}
+
+static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
+ .dpms = nv50_sor_dpms,
+ .save = nv50_sor_save,
+ .restore = nv50_sor_restore,
+ .mode_fixup = nv50_sor_mode_fixup,
+ .prepare = nv50_sor_prepare,
+ .commit = nv50_sor_commit,
+ .mode_set = nv50_sor_mode_set,
+ .detect = NULL
+};
+
+static void
+nv50_sor_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (!encoder)
+ return;
+
+ NV_DEBUG(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
+ .destroy = nv50_sor_destroy,
+};
+
+int
+nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_encoder *encoder;
+ bool dum;
+ int type;
+
+ NV_DEBUG(dev, "\n");
+
+ switch (entry->type) {
+ case OUTPUT_TMDS:
+ NV_INFO(dev, "Detected a TMDS output\n");
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case OUTPUT_LVDS:
+ NV_INFO(dev, "Detected a LVDS output\n");
+ type = DRM_MODE_ENCODER_LVDS;
+
+ if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
+ NV_ERROR(dev, "Failed parsing LVDS table\n");
+ return -EINVAL;
+ }
+ break;
+ case OUTPUT_DP:
+ NV_INFO(dev, "Detected a DP output\n");
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ nv_encoder->disconnect = nv50_sor_disconnect;
+
+ drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
+ drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
new file mode 100644
index 00000000000..5998c35237b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -0,0 +1,535 @@
+/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
+/*
+ * Copyright 1996-1997 David J. McKay
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
+
+#ifndef __NVREG_H_
+#define __NVREG_H_
+
+#define NV_PMC_OFFSET 0x00000000
+#define NV_PMC_SIZE 0x00001000
+
+#define NV_PBUS_OFFSET 0x00001000
+#define NV_PBUS_SIZE 0x00001000
+
+#define NV_PFIFO_OFFSET 0x00002000
+#define NV_PFIFO_SIZE 0x00002000
+
+#define NV_HDIAG_OFFSET 0x00005000
+#define NV_HDIAG_SIZE 0x00001000
+
+#define NV_PRAM_OFFSET 0x00006000
+#define NV_PRAM_SIZE 0x00001000
+
+#define NV_PVIDEO_OFFSET 0x00008000
+#define NV_PVIDEO_SIZE 0x00001000
+
+#define NV_PTIMER_OFFSET 0x00009000
+#define NV_PTIMER_SIZE 0x00001000
+
+#define NV_PPM_OFFSET 0x0000A000
+#define NV_PPM_SIZE 0x00001000
+
+#define NV_PTV_OFFSET 0x0000D000
+#define NV_PTV_SIZE 0x00001000
+
+#define NV_PRMVGA_OFFSET 0x000A0000
+#define NV_PRMVGA_SIZE 0x00020000
+
+#define NV_PRMVIO0_OFFSET 0x000C0000
+#define NV_PRMVIO_SIZE 0x00002000
+#define NV_PRMVIO1_OFFSET 0x000C2000
+
+#define NV_PFB_OFFSET 0x00100000
+#define NV_PFB_SIZE 0x00001000
+
+#define NV_PEXTDEV_OFFSET 0x00101000
+#define NV_PEXTDEV_SIZE 0x00001000
+
+#define NV_PME_OFFSET 0x00200000
+#define NV_PME_SIZE 0x00001000
+
+#define NV_PROM_OFFSET 0x00300000
+#define NV_PROM_SIZE 0x00010000
+
+#define NV_PGRAPH_OFFSET 0x00400000
+#define NV_PGRAPH_SIZE 0x00010000
+
+#define NV_PCRTC0_OFFSET 0x00600000
+#define NV_PCRTC0_SIZE 0x00002000 /* empirical */
+
+#define NV_PRMCIO0_OFFSET 0x00601000
+#define NV_PRMCIO_SIZE 0x00002000
+#define NV_PRMCIO1_OFFSET 0x00603000
+
+#define NV50_DISPLAY_OFFSET 0x00610000
+#define NV50_DISPLAY_SIZE 0x0000FFFF
+
+#define NV_PRAMDAC0_OFFSET 0x00680000
+#define NV_PRAMDAC0_SIZE 0x00002000
+
+#define NV_PRMDIO0_OFFSET 0x00681000
+#define NV_PRMDIO_SIZE 0x00002000
+#define NV_PRMDIO1_OFFSET 0x00683000
+
+#define NV_PRAMIN_OFFSET 0x00700000
+#define NV_PRAMIN_SIZE 0x00100000
+
+#define NV_FIFO_OFFSET 0x00800000
+#define NV_FIFO_SIZE 0x00800000
+
+#define NV_PMC_BOOT_0 0x00000000
+#define NV_PMC_ENABLE 0x00000200
+
+#define NV_VIO_VSE2 0x000003c3
+#define NV_VIO_SRX 0x000003c4
+
+#define NV_CIO_CRX__COLOR 0x000003d4
+#define NV_CIO_CR__COLOR 0x000003d5
+
+#define NV_PBUS_DEBUG_1 0x00001084
+#define NV_PBUS_DEBUG_4 0x00001098
+#define NV_PBUS_DEBUG_DUALHEAD_CTL 0x000010f0
+#define NV_PBUS_POWERCTRL_1 0x00001584
+#define NV_PBUS_POWERCTRL_2 0x00001588
+#define NV_PBUS_POWERCTRL_4 0x00001590
+#define NV_PBUS_PCI_NV_19 0x0000184C
+#define NV_PBUS_PCI_NV_20 0x00001850
+# define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
+# define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
+
+#define NV_PFIFO_RAMHT 0x00002210
+
+#define NV_PTV_TV_INDEX 0x0000d220
+#define NV_PTV_TV_DATA 0x0000d224
+#define NV_PTV_HFILTER 0x0000d310
+#define NV_PTV_HFILTER2 0x0000d390
+#define NV_PTV_VFILTER 0x0000d510
+
+#define NV_PRMVIO_MISC__WRITE 0x000c03c2
+#define NV_PRMVIO_SRX 0x000c03c4
+#define NV_PRMVIO_SR 0x000c03c5
+# define NV_VIO_SR_RESET_INDEX 0x00
+# define NV_VIO_SR_CLOCK_INDEX 0x01
+# define NV_VIO_SR_PLANE_MASK_INDEX 0x02
+# define NV_VIO_SR_CHAR_MAP_INDEX 0x03
+# define NV_VIO_SR_MEM_MODE_INDEX 0x04
+#define NV_PRMVIO_MISC__READ 0x000c03cc
+#define NV_PRMVIO_GRX 0x000c03ce
+#define NV_PRMVIO_GX 0x000c03cf
+# define NV_VIO_GX_SR_INDEX 0x00
+# define NV_VIO_GX_SREN_INDEX 0x01
+# define NV_VIO_GX_CCOMP_INDEX 0x02
+# define NV_VIO_GX_ROP_INDEX 0x03
+# define NV_VIO_GX_READ_MAP_INDEX 0x04
+# define NV_VIO_GX_MODE_INDEX 0x05
+# define NV_VIO_GX_MISC_INDEX 0x06
+# define NV_VIO_GX_DONT_CARE_INDEX 0x07
+# define NV_VIO_GX_BIT_MASK_INDEX 0x08
+
+#define NV_PFB_BOOT_0 0x00100000
+#define NV_PFB_CFG0 0x00100200
+#define NV_PFB_CFG1 0x00100204
+#define NV_PFB_CSTATUS 0x0010020C
+#define NV_PFB_REFCTRL 0x00100210
+# define NV_PFB_REFCTRL_VALID_1 (1 << 31)
+#define NV_PFB_PAD 0x0010021C
+# define NV_PFB_PAD_CKE_NORMAL (1 << 0)
+#define NV_PFB_TILE_NV10 0x00100240
+#define NV_PFB_TILE_SIZE_NV10 0x00100244
+#define NV_PFB_REF 0x001002D0
+# define NV_PFB_REF_CMD_REFRESH (1 << 0)
+#define NV_PFB_PRE 0x001002D4
+# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0)
+#define NV_PFB_CLOSE_PAGE2 0x0010033C
+#define NV_PFB_TILE_NV40 0x00100600
+#define NV_PFB_TILE_SIZE_NV40 0x00100604
+
+#define NV_PEXTDEV_BOOT_0 0x00101000
+# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
+#define NV_PEXTDEV_BOOT_3 0x0010100c
+
+#define NV_PCRTC_INTR_0 0x00600100
+# define NV_PCRTC_INTR_0_VBLANK (1 << 0)
+#define NV_PCRTC_INTR_EN_0 0x00600140
+#define NV_PCRTC_START 0x00600800
+#define NV_PCRTC_CONFIG 0x00600804
+# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
+# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
+#define NV_PCRTC_CURSOR_CONFIG 0x00600810
+# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
+# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
+# define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM (1 << 8)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32 (1 << 12)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 (1 << 16)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32 (2 << 24)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 (4 << 24)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA (1 << 28)
+
+/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
+#define NV_PCRTC_GPIO 0x00600818
+#define NV_PCRTC_GPIO_EXT 0x0060081c
+#define NV_PCRTC_830 0x00600830
+#define NV_PCRTC_834 0x00600834
+#define NV_PCRTC_850 0x00600850
+#define NV_PCRTC_ENGINE_CTRL 0x00600860
+# define NV_CRTC_FSEL_I2C (1 << 4)
+# define NV_CRTC_FSEL_OVERLAY (1 << 12)
+
+#define NV_PRMCIO_ARX 0x006013c0
+#define NV_PRMCIO_AR__WRITE 0x006013c0
+#define NV_PRMCIO_AR__READ 0x006013c1
+# define NV_CIO_AR_MODE_INDEX 0x10
+# define NV_CIO_AR_OSCAN_INDEX 0x11
+# define NV_CIO_AR_PLANE_INDEX 0x12
+# define NV_CIO_AR_HPP_INDEX 0x13
+# define NV_CIO_AR_CSEL_INDEX 0x14
+#define NV_PRMCIO_INP0 0x006013c2
+#define NV_PRMCIO_CRX__COLOR 0x006013d4
+#define NV_PRMCIO_CR__COLOR 0x006013d5
+ /* Standard VGA CRTC registers */
+# define NV_CIO_CR_HDT_INDEX 0x00 /* horizontal display total */
+# define NV_CIO_CR_HDE_INDEX 0x01 /* horizontal display end */
+# define NV_CIO_CR_HBS_INDEX 0x02 /* horizontal blanking start */
+# define NV_CIO_CR_HBE_INDEX 0x03 /* horizontal blanking end */
+# define NV_CIO_CR_HBE_4_0 4:0
+# define NV_CIO_CR_HRS_INDEX 0x04 /* horizontal retrace start */
+# define NV_CIO_CR_HRE_INDEX 0x05 /* horizontal retrace end */
+# define NV_CIO_CR_HRE_4_0 4:0
+# define NV_CIO_CR_HRE_HBE_5 7:7
+# define NV_CIO_CR_VDT_INDEX 0x06 /* vertical display total */
+# define NV_CIO_CR_OVL_INDEX 0x07 /* overflow bits */
+# define NV_CIO_CR_OVL_VDT_8 0:0
+# define NV_CIO_CR_OVL_VDE_8 1:1
+# define NV_CIO_CR_OVL_VRS_8 2:2
+# define NV_CIO_CR_OVL_VBS_8 3:3
+# define NV_CIO_CR_OVL_VDT_9 5:5
+# define NV_CIO_CR_OVL_VDE_9 6:6
+# define NV_CIO_CR_OVL_VRS_9 7:7
+# define NV_CIO_CR_RSAL_INDEX 0x08 /* normally "preset row scan" */
+# define NV_CIO_CR_CELL_HT_INDEX 0x09 /* cell height?! normally "max scan line" */
+# define NV_CIO_CR_CELL_HT_VBS_9 5:5
+# define NV_CIO_CR_CELL_HT_SCANDBL 7:7
+# define NV_CIO_CR_CURS_ST_INDEX 0x0a /* cursor start */
+# define NV_CIO_CR_CURS_END_INDEX 0x0b /* cursor end */
+# define NV_CIO_CR_SA_HI_INDEX 0x0c /* screen start address high */
+# define NV_CIO_CR_SA_LO_INDEX 0x0d /* screen start address low */
+# define NV_CIO_CR_TCOFF_HI_INDEX 0x0e /* cursor offset high */
+# define NV_CIO_CR_TCOFF_LO_INDEX 0x0f /* cursor offset low */
+# define NV_CIO_CR_VRS_INDEX 0x10 /* vertical retrace start */
+# define NV_CIO_CR_VRE_INDEX 0x11 /* vertical retrace end */
+# define NV_CIO_CR_VRE_3_0 3:0
+# define NV_CIO_CR_VDE_INDEX 0x12 /* vertical display end */
+# define NV_CIO_CR_OFFSET_INDEX 0x13 /* sets screen pitch */
+# define NV_CIO_CR_ULINE_INDEX 0x14 /* underline location */
+# define NV_CIO_CR_VBS_INDEX 0x15 /* vertical blank start */
+# define NV_CIO_CR_VBE_INDEX 0x16 /* vertical blank end */
+# define NV_CIO_CR_MODE_INDEX 0x17 /* crtc mode control */
+# define NV_CIO_CR_LCOMP_INDEX 0x18 /* line compare */
+ /* Extended VGA CRTC registers */
+# define NV_CIO_CRE_RPC0_INDEX 0x19 /* repaint control 0 */
+# define NV_CIO_CRE_RPC0_OFFSET_10_8 7:5
+# define NV_CIO_CRE_RPC1_INDEX 0x1a /* repaint control 1 */
+# define NV_CIO_CRE_RPC1_LARGE 2:2
+# define NV_CIO_CRE_FF_INDEX 0x1b /* fifo control */
+# define NV_CIO_CRE_ENH_INDEX 0x1c /* enhanced? */
+# define NV_CIO_SR_LOCK_INDEX 0x1f /* crtc lock */
+# define NV_CIO_SR_UNLOCK_RW_VALUE 0x57
+# define NV_CIO_SR_LOCK_VALUE 0x99
+# define NV_CIO_CRE_FFLWM__INDEX 0x20 /* fifo low water mark */
+# define NV_CIO_CRE_21 0x21 /* vga shadow crtc lock */
+# define NV_CIO_CRE_LSR_INDEX 0x25 /* ? */
+# define NV_CIO_CRE_LSR_VDT_10 0:0
+# define NV_CIO_CRE_LSR_VDE_10 1:1
+# define NV_CIO_CRE_LSR_VRS_10 2:2
+# define NV_CIO_CRE_LSR_VBS_10 3:3
+# define NV_CIO_CRE_LSR_HBE_6 4:4
+# define NV_CIO_CR_ARX_INDEX 0x26 /* attribute index -- ro copy of 0x60.3c0 */
+# define NV_CIO_CRE_CHIP_ID_INDEX 0x27 /* chip revision */
+# define NV_CIO_CRE_PIXEL_INDEX 0x28
+# define NV_CIO_CRE_PIXEL_FORMAT 1:0
+# define NV_CIO_CRE_HEB__INDEX 0x2d /* horizontal extra bits? */
+# define NV_CIO_CRE_HEB_HDT_8 0:0
+# define NV_CIO_CRE_HEB_HDE_8 1:1
+# define NV_CIO_CRE_HEB_HBS_8 2:2
+# define NV_CIO_CRE_HEB_HRS_8 3:3
+# define NV_CIO_CRE_HEB_ILC_8 4:4
+# define NV_CIO_CRE_2E 0x2e /* some scratch or dummy reg to force writes to sink in */
+# define NV_CIO_CRE_HCUR_ADDR2_INDEX 0x2f /* cursor */
+# define NV_CIO_CRE_HCUR_ADDR0_INDEX 0x30 /* pixmap */
+# define NV_CIO_CRE_HCUR_ADDR0_ADR 6:0
+# define NV_CIO_CRE_HCUR_ASI 7:7
+# define NV_CIO_CRE_HCUR_ADDR1_INDEX 0x31 /* address */
+# define NV_CIO_CRE_HCUR_ADDR1_ENABLE 0:0
+# define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL 1:1
+# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
+# define NV_CIO_CRE_LCD__INDEX 0x33
+# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
+# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
+# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
+# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
+# define NV_CIO_CRE_SCRATCH3__INDEX 0x3b
+# define NV_CIO_CRE_SCRATCH4__INDEX 0x3c
+# define NV_CIO_CRE_DDC_STATUS__INDEX 0x3e
+# define NV_CIO_CRE_DDC_WR__INDEX 0x3f
+# define NV_CIO_CRE_EBR_INDEX 0x41 /* extra bits ? (vertical) */
+# define NV_CIO_CRE_EBR_VDT_11 0:0
+# define NV_CIO_CRE_EBR_VDE_11 2:2
+# define NV_CIO_CRE_EBR_VRS_11 4:4
+# define NV_CIO_CRE_EBR_VBS_11 6:6
+# define NV_CIO_CRE_43 0x43
+# define NV_CIO_CRE_44 0x44 /* head control */
+# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
+# define NV_CIO_CRE_RCR 0x46
+# define NV_CIO_CRE_RCR_ENDIAN_BIG 7:7
+# define NV_CIO_CRE_47 0x47 /* extended fifo lwm, used on nv30+ */
+# define NV_CIO_CRE_49 0x49
+# define NV_CIO_CRE_4B 0x4b /* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
+# define NV_CIO_CRE_TVOUT_LATENCY 0x52
+# define NV_CIO_CRE_53 0x53 /* `fp_htiming' according to Haiku */
+# define NV_CIO_CRE_54 0x54 /* `fp_vtiming' according to Haiku */
+# define NV_CIO_CRE_57 0x57 /* index reg for cr58 */
+# define NV_CIO_CRE_58 0x58 /* data reg for cr57 */
+# define NV_CIO_CRE_59 0x59 /* related to on/off-chip-ness of digital outputs */
+# define NV_CIO_CRE_5B 0x5B /* newer colour saturation reg */
+# define NV_CIO_CRE_85 0x85
+# define NV_CIO_CRE_86 0x86
+#define NV_PRMCIO_INP0__COLOR 0x006013da
+
+#define NV_PRAMDAC_CU_START_POS 0x00680300
+# define NV_PRAMDAC_CU_START_POS_X 15:0
+# define NV_PRAMDAC_CU_START_POS_Y 31:16
+#define NV_RAMDAC_NV10_CURSYNC 0x00680404
+
+#define NV_PRAMDAC_NVPLL_COEFF 0x00680500
+#define NV_PRAMDAC_MPLL_COEFF 0x00680504
+#define NV_PRAMDAC_VPLL_COEFF 0x00680508
+# define NV30_RAMDAC_ENABLE_VCO2 (8 << 4)
+
+#define NV_PRAMDAC_PLL_COEFF_SELECT 0x0068050c
+# define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE (4 << 0)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL (1 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL (2 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL (4 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 (8 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 (1 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 (2 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 (4 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2 (8 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP (1 << 20)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2 (1 << 28)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2 (2 << 28)
+
+#define NV_PRAMDAC_PLL_SETUP_CONTROL 0x00680510
+#define NV_RAMDAC_VPLL2 0x00680520
+#define NV_PRAMDAC_SEL_CLK 0x00680524
+#define NV_RAMDAC_DITHER_NV11 0x00680528
+#define NV_PRAMDAC_DACCLK 0x0068052c
+# define NV_PRAMDAC_DACCLK_SEL_DACCLK (1 << 0)
+
+#define NV_RAMDAC_NVPLL_B 0x00680570
+#define NV_RAMDAC_MPLL_B 0x00680574
+#define NV_RAMDAC_VPLL_B 0x00680578
+#define NV_RAMDAC_VPLL2_B 0x0068057c
+# define NV31_RAMDAC_ENABLE_VCO2 (8 << 28)
+#define NV_PRAMDAC_580 0x00680580
+# define NV_RAMDAC_580_VPLL1_ACTIVE (1 << 8)
+# define NV_RAMDAC_580_VPLL2_ACTIVE (1 << 28)
+
+#define NV_PRAMDAC_GENERAL_CONTROL 0x00680600
+# define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON (3 << 4)
+# define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL (1 << 8)
+# define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL (1 << 12)
+# define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM (2 << 16)
+# define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS (1 << 20)
+# define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG (2 << 28)
+#define NV_PRAMDAC_TEST_CONTROL 0x00680608
+# define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED (1 << 12)
+# define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF (1 << 16)
+# define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI (1 << 28)
+#define NV_PRAMDAC_TESTPOINT_DATA 0x00680610
+# define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK (8 << 28)
+#define NV_PRAMDAC_630 0x00680630
+#define NV_PRAMDAC_634 0x00680634
+
+#define NV_PRAMDAC_TV_SETUP 0x00680700
+#define NV_PRAMDAC_TV_VTOTAL 0x00680720
+#define NV_PRAMDAC_TV_VSKEW 0x00680724
+#define NV_PRAMDAC_TV_VSYNC_DELAY 0x00680728
+#define NV_PRAMDAC_TV_HTOTAL 0x0068072c
+#define NV_PRAMDAC_TV_HSKEW 0x00680730
+#define NV_PRAMDAC_TV_HSYNC_DELAY 0x00680734
+#define NV_PRAMDAC_TV_HSYNC_DELAY2 0x00680738
+
+#define NV_PRAMDAC_TV_SETUP 0x00680700
+
+#define NV_PRAMDAC_FP_VDISPLAY_END 0x00680800
+#define NV_PRAMDAC_FP_VTOTAL 0x00680804
+#define NV_PRAMDAC_FP_VCRTC 0x00680808
+#define NV_PRAMDAC_FP_VSYNC_START 0x0068080c
+#define NV_PRAMDAC_FP_VSYNC_END 0x00680810
+#define NV_PRAMDAC_FP_VVALID_START 0x00680814
+#define NV_PRAMDAC_FP_VVALID_END 0x00680818
+#define NV_PRAMDAC_FP_HDISPLAY_END 0x00680820
+#define NV_PRAMDAC_FP_HTOTAL 0x00680824
+#define NV_PRAMDAC_FP_HCRTC 0x00680828
+#define NV_PRAMDAC_FP_HSYNC_START 0x0068082c
+#define NV_PRAMDAC_FP_HSYNC_END 0x00680830
+#define NV_PRAMDAC_FP_HVALID_START 0x00680834
+#define NV_PRAMDAC_FP_HVALID_END 0x00680838
+
+#define NV_RAMDAC_FP_DITHER 0x0068083c
+#define NV_PRAMDAC_FP_TG_CONTROL 0x00680848
+# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS (1 << 0)
+# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE (2 << 0)
+# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS (1 << 4)
+# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE (2 << 4)
+# define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE (0 << 8)
+# define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER (1 << 8)
+# define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE (2 << 8)
+# define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG (1 << 20)
+# define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 (1 << 24)
+# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS (1 << 28)
+# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE (2 << 28)
+#define NV_PRAMDAC_FP_MARGIN_COLOR 0x0068084c
+#define NV_PRAMDAC_850 0x00680850
+#define NV_PRAMDAC_85C 0x0068085c
+#define NV_PRAMDAC_FP_DEBUG_0 0x00680880
+# define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE (1 << 0)
+# define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE (1 << 4)
+/* This doesn't seem to be essential for tmds, but still often set */
+# define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED (8 << 4)
+# define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR (1 << 8)
+# define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR (1 << 12)
+# define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND (1 << 20)
+# define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND (1 << 24)
+# define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK (1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_1 0x00680884
+# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE 11:0
+# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE (1 << 12)
+# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE 27:16
+# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE (1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_2 0x00680888
+#define NV_PRAMDAC_FP_DEBUG_3 0x0068088C
+
+/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
+#define NV_PRAMDAC_FP_TMDS_CONTROL 0x006808b0
+# define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE (1 << 16)
+#define NV_PRAMDAC_FP_TMDS_DATA 0x006808b4
+
+#define NV_PRAMDAC_8C0 0x006808c0
+
+/* Some kind of switch */
+#define NV_PRAMDAC_900 0x00680900
+#define NV_PRAMDAC_A20 0x00680A20
+#define NV_PRAMDAC_A24 0x00680A24
+#define NV_PRAMDAC_A34 0x00680A34
+
+#define NV_PRAMDAC_CTV 0x00680c00
+
+/* names fabricated from NV_USER_DAC info */
+#define NV_PRMDIO_PIXEL_MASK 0x006813c6
+# define NV_PRMDIO_PIXEL_MASK_MASK 0xff
+#define NV_PRMDIO_READ_MODE_ADDRESS 0x006813c7
+#define NV_PRMDIO_WRITE_MODE_ADDRESS 0x006813c8
+#define NV_PRMDIO_PALETTE_DATA 0x006813c9
+
+#define NV_PGRAPH_DEBUG_0 0x00400080
+#define NV_PGRAPH_DEBUG_1 0x00400084
+#define NV_PGRAPH_DEBUG_2_NV04 0x00400088
+#define NV_PGRAPH_DEBUG_2 0x00400620
+#define NV_PGRAPH_DEBUG_3 0x0040008c
+#define NV_PGRAPH_DEBUG_4 0x00400090
+#define NV_PGRAPH_INTR 0x00400100
+#define NV_PGRAPH_INTR_EN 0x00400140
+#define NV_PGRAPH_CTX_CONTROL 0x00400144
+#define NV_PGRAPH_CTX_CONTROL_NV04 0x00400170
+#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
+#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540
+#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544
+#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548
+#define NV_PGRAPH_BETA_AND 0x00400608
+#define NV_PGRAPH_LIMIT_VIOL_PIX 0x00400610
+#define NV_PGRAPH_BOFFSET0 0x00400640
+#define NV_PGRAPH_BOFFSET1 0x00400644
+#define NV_PGRAPH_BOFFSET2 0x00400648
+#define NV_PGRAPH_BLIMIT0 0x00400684
+#define NV_PGRAPH_BLIMIT1 0x00400688
+#define NV_PGRAPH_BLIMIT2 0x0040068c
+#define NV_PGRAPH_STATUS 0x00400700
+#define NV_PGRAPH_SURFACE 0x00400710
+#define NV_PGRAPH_STATE 0x00400714
+#define NV_PGRAPH_FIFO 0x00400720
+#define NV_PGRAPH_PATTERN_SHAPE 0x00400810
+#define NV_PGRAPH_TILE 0x00400b00
+
+#define NV_PVIDEO_INTR_EN 0x00008140
+#define NV_PVIDEO_BUFFER 0x00008700
+#define NV_PVIDEO_STOP 0x00008704
+#define NV_PVIDEO_UVPLANE_BASE(buff) (0x00008800+(buff)*4)
+#define NV_PVIDEO_UVPLANE_LIMIT(buff) (0x00008808+(buff)*4)
+#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff) (0x00008820+(buff)*4)
+#define NV_PVIDEO_BASE(buff) (0x00008900+(buff)*4)
+#define NV_PVIDEO_LIMIT(buff) (0x00008908+(buff)*4)
+#define NV_PVIDEO_LUMINANCE(buff) (0x00008910+(buff)*4)
+#define NV_PVIDEO_CHROMINANCE(buff) (0x00008918+(buff)*4)
+#define NV_PVIDEO_OFFSET_BUFF(buff) (0x00008920+(buff)*4)
+#define NV_PVIDEO_SIZE_IN(buff) (0x00008928+(buff)*4)
+#define NV_PVIDEO_POINT_IN(buff) (0x00008930+(buff)*4)
+#define NV_PVIDEO_DS_DX(buff) (0x00008938+(buff)*4)
+#define NV_PVIDEO_DT_DY(buff) (0x00008940+(buff)*4)
+#define NV_PVIDEO_POINT_OUT(buff) (0x00008948+(buff)*4)
+#define NV_PVIDEO_SIZE_OUT(buff) (0x00008950+(buff)*4)
+#define NV_PVIDEO_FORMAT(buff) (0x00008958+(buff)*4)
+# define NV_PVIDEO_FORMAT_PLANAR (1 << 0)
+# define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8 (1 << 16)
+# define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY (1 << 20)
+# define NV_PVIDEO_FORMAT_MATRIX_ITURBT709 (1 << 24)
+#define NV_PVIDEO_COLOR_KEY 0x00008B00
+
+/* NV04 overlay defines from VIDIX & Haiku */
+#define NV_PVIDEO_INTR_EN_0 0x00680140
+#define NV_PVIDEO_STEP_SIZE 0x00680200
+#define NV_PVIDEO_CONTROL_Y 0x00680204
+#define NV_PVIDEO_CONTROL_X 0x00680208
+#define NV_PVIDEO_BUFF0_START_ADDRESS 0x0068020c
+#define NV_PVIDEO_BUFF0_PITCH_LENGTH 0x00680214
+#define NV_PVIDEO_BUFF0_OFFSET 0x0068021c
+#define NV_PVIDEO_BUFF1_START_ADDRESS 0x00680210
+#define NV_PVIDEO_BUFF1_PITCH_LENGTH 0x00680218
+#define NV_PVIDEO_BUFF1_OFFSET 0x00680220
+#define NV_PVIDEO_OE_STATE 0x00680224
+#define NV_PVIDEO_SU_STATE 0x00680228
+#define NV_PVIDEO_RM_STATE 0x0068022c
+#define NV_PVIDEO_WINDOW_START 0x00680230
+#define NV_PVIDEO_WINDOW_SIZE 0x00680234
+#define NV_PVIDEO_FIFO_THRES_SIZE 0x00680238
+#define NV_PVIDEO_FIFO_BURST_LENGTH 0x0068023c
+#define NV_PVIDEO_KEY 0x00680240
+#define NV_PVIDEO_OVERLAY 0x00680244
+#define NV_PVIDEO_RED_CSC_OFFSET 0x00680280
+#define NV_PVIDEO_GREEN_CSC_OFFSET 0x00680284
+#define NV_PVIDEO_BLUE_CSC_OFFSET 0x00680288
+#define NV_PVIDEO_CSC_ADJUST 0x0068028c
+
+#endif
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5713eedd6e..feb52eee431 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d67c42555ab..6578d19dff9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -263,10 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
+ val = gctx->scratch[((gctx->fb_base + idx) / 4)];
if (print)
DEBUG("FB[0x%02X]", idx);
- printk(KERN_INFO "FB access is not implemented.\n");
- return 0;
+ break;
case ATOM_ARG_IMM:
switch (align) {
case ATOM_SRC_DWORD:
@@ -488,9 +488,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
+ gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
DEBUG("FB[0x%02X]", idx);
- printk(KERN_INFO "FB access is not implemented.\n");
- return;
+ break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
@@ -1214,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
*crev = CU8(idx + 3);
return;
}
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+ int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+ uint16_t data_offset;
+ int usage_bytes;
+ struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+ atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+ firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+ firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+
+ usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ if (usage_bytes == 0)
+ usage_bytes = 20 * 1024;
+ /* allocate some scratch memory */
+ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+ if (!ctx->scratch)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index e6eb38f2bca..6671848e5ea 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -132,6 +132,7 @@ struct atom_context {
uint8_t shift;
int cs_equal, cs_above;
int io_mode;
+ uint32_t *scratch;
};
extern int atom_debug;
@@ -142,6 +143,7 @@ int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h"
#include "atombios.h"
#include "ObjectID.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index c11ddddfb3b..5f48515c77a 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1141,7 +1141,7 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
/* ucTableFormatRevision=1,ucTableContentRevision=2 */
typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx defintions below */
+ UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */
UCHAR ucAction; /* 0: turn off encoder */
/* 1: setup and turn on encoder */
UCHAR ucTruncate; /* bit0=0: Disable truncate */
@@ -1424,7 +1424,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
/* Structures used in FirmwareInfoTable */
/****************************************************************************/
-/* usBIOSCapability Defintion: */
+/* usBIOSCapability Definition: */
/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
@@ -2386,7 +2386,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
} ATOM_ANALOG_TV_INFO_V1_2;
/**************************************************************************/
-/* VRAM usage and their defintions */
+/* VRAM usage and their definitions */
/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
typedef struct _ATOM_HPD_INT_RECORD {
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucPluggged_PinState;
+ UCHAR ucPlugged_PinState;
} ATOM_HPD_INT_RECORD;
typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
@@ -3046,7 +3046,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
-/* Byte aligned defintion for BIOS usage */
+/* Byte aligned definition for BIOS usage */
#define ATOM_S0_CRT1_MONOb0 0x01
#define ATOM_S0_CRT1_COLORb0 0x02
#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -3131,7 +3131,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
-/* Byte aligned defintion for BIOS usage */
+/* Byte aligned definition for BIOS usage */
#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
@@ -3190,7 +3190,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
-/* Byte aligned defintion for BIOS usage */
+/* Byte aligned definition for BIOS usage */
#define ATOM_S3_CRT1_ACTIVEb0 0x01
#define ATOM_S3_LCD1_ACTIVEb0 0x02
#define ATOM_S3_TV1_ACTIVEb0 0x04
@@ -3230,7 +3230,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
#define ATOM_S4_LCD1_REFRESH_SHIFT 8
-/* Byte aligned defintion for BIOS usage */
+/* Byte aligned definition for BIOS usage */
#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
@@ -3310,7 +3310,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
-/* Byte aligned defintion for BIOS usage */
+/* Byte aligned definition for BIOS usage */
#define ATOM_S6_DEVICE_CHANGEb0 0x01
#define ATOM_S6_SCALER_CHANGEb0 0x02
#define ATOM_S6_LID_CHANGEb0 0x04
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c15287a590f..260fcf59f00 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
atombios_blank_crtc(crtc, 0);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, 1);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
atombios_enable_crtc(crtc, 0);
break;
}
-
- if (mode != DRM_MODE_DPMS_OFF) {
- radeon_crtc_load_lut(crtc);
- }
}
static void
@@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type !=
DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (!ASIC_IS_AVIVO(rdev)
- && (encoder->encoder_type ==
- DRM_MODE_ENCODER_LVDS))
+ if (encoder->encoder_type ==
+ DRM_MODE_ENCODER_LVDS)
pll_flags |= RADEON_PLL_USE_REF_DIV;
}
radeon_encoder = to_radeon_encoder(encoder);
@@ -500,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else
pll = &rdev->clock.p2pll;
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll)
+ radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
+ else
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
+ } else
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -574,21 +583,32 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
- struct drm_radeon_gem_object *obj_priv;
+ struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
- if (!crtc->fb)
- return -EINVAL;
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- obj_priv = obj->driver_private;
-
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
switch (crtc->fb->bits_per_pixel) {
case 8:
@@ -618,8 +638,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
@@ -674,7 +692,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 00000000000..0d63c4436e7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "drm_dp_helper.h"
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_LINK_STATUS_SIZE 6
+#define DP_DPCD_SIZE 8
+
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+static const int dp_clocks[] = {
+ 54000, /* 1 lane, 1.62 Ghz */
+ 90000, /* 1 lane, 2.70 Ghz */
+ 108000, /* 2 lane, 1.62 Ghz */
+ 180000, /* 2 lane, 2.70 Ghz */
+ 216000, /* 4 lane, 1.62 Ghz */
+ 360000, /* 4 lane, 2.70 Ghz */
+};
+
+static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
+
+/* common helper functions */
+static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int i;
+ u8 max_link_bw;
+ u8 max_lane_count;
+
+ if (!dpcd)
+ return 0;
+
+ max_link_bw = dpcd[DP_MAX_LINK_RATE];
+ max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ for (i = 0; i < num_dp_clocks; i++) {
+ if (i % 2)
+ continue;
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock) {
+ if (i < 2)
+ return 1;
+ else if (i < 4)
+ return 2;
+ else
+ return 4;
+ }
+ }
+ break;
+ case DP_LINK_BW_2_7:
+ for (i = 0; i < num_dp_clocks; i++) {
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock) {
+ if (i < 2)
+ return 1;
+ else if (i < 4)
+ return 2;
+ else
+ return 4;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int i;
+ u8 max_link_bw;
+ u8 max_lane_count;
+
+ if (!dpcd)
+ return 0;
+
+ max_link_bw = dpcd[DP_MAX_LINK_RATE];
+ max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ for (i = 0; i < num_dp_clocks; i++) {
+ if (i % 2)
+ continue;
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock)
+ return 162000;
+ }
+ break;
+ case DP_LINK_BW_2_7:
+ for (i = 0; i < num_dp_clocks; i++) {
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock)
+ return (i % 2) ? 270000 : 162000;
+ }
+ }
+
+ return 0;
+}
+
+int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
+ int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+
+ if ((lanes == 0) || (bw == 0))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+/* XXX fix me -- chip specific */
+#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
+static u8 dp_pre_emphasis_max(u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ u8 train_set[4])
+{
+ u8 v = 0;
+ u8 p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+ DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+ lane,
+ voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= DP_VOLTAGE_MAX)
+ v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= dp_pre_emphasis_max(v))
+ p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
+ voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+
+/* radeon aux chan functions */
+bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
+ int num_bytes, u8 *read_byte,
+ u8 read_buf_len, u8 delay)
+{
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+ unsigned char *base;
+
+ memset(&args, 0, sizeof(args));
+
+ base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+ memcpy(base, req_bytes, num_bytes);
+
+ args.lpAuxRequest = 0;
+ args.lpDataOut = 16;
+ args.ucDataOutLen = 0;
+ args.ucChannelID = chan->rec.i2c_id;
+ args.ucDelay = delay / 10;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ if (args.ucReplyStatus) {
+ DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+ req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
+ chan->rec.i2c_id, args.ucReplyStatus);
+ return false;
+ }
+
+ if (args.ucDataOutLen && read_byte && read_buf_len) {
+ if (read_buf_len < args.ucDataOutLen) {
+ DRM_ERROR("Buffer to small for return answer %d %d\n",
+ read_buf_len, args.ucDataOutLen);
+ return false;
+ }
+ {
+ int len = min(read_buf_len, args.ucDataOutLen);
+ memcpy(read_byte, base + 16, len);
+ }
+ }
+ return true;
+}
+
+bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
+ uint8_t send_bytes, uint8_t *send)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[20];
+ u8 msg_len, dp_msg_len;
+ bool ret;
+
+ dp_msg_len = 4;
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_WRITE << 4;
+ dp_msg_len += send_bytes;
+ msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
+
+ if (send_bytes > 16)
+ return false;
+
+ memcpy(&msg[4], send, send_bytes);
+ msg_len = 4 + send_bytes;
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
+ return ret;
+}
+
+bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
+ uint8_t delay, uint8_t expected_bytes,
+ uint8_t *read_p)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[20];
+ u8 msg_len, dp_msg_len;
+ bool ret = false;
+ msg_len = 4;
+ dp_msg_len = 4;
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_READ << 4;
+ msg[3] = (dp_msg_len) << 4;
+ msg[3] |= expected_bytes - 1;
+
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
+ return ret;
+}
+
+/* radeon dp functions */
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
+ uint8_t ucconfig, uint8_t lane_num)
+{
+ DP_ENCODER_SERVICE_PARAMETERS args;
+ int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+ memset(&args, 0, sizeof(args));
+ args.ucLinkClock = dp_clock / 10;
+ args.ucConfig = ucconfig;
+ args.ucAction = action;
+ args.ucLaneNum = lane_num;
+ args.ucStatus = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+ dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[25];
+ int ret;
+
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
+ if (ret) {
+ memcpy(dig_connector->dpcd, msg, 8);
+ {
+ int i;
+ DRM_DEBUG("DPCD: ");
+ for (i = 0; i < 8; i++)
+ DRM_DEBUG("%02x ", msg[i]);
+ DRM_DEBUG("\n");
+ }
+ return true;
+ }
+ dig_connector->dpcd[0] = 0;
+ return false;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ dig_connector->dp_clock =
+ dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
+ dig_connector->dp_lane_count =
+ dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
+}
+
+int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+ return dp_mode_valid(dig_connector->dpcd, mode->clock);
+}
+
+static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
+ DP_LINK_STATUS_SIZE, link_status);
+ if (!ret) {
+ DRM_ERROR("displayport link status failed\n");
+ return false;
+ }
+
+ DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+ return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ return false;
+ if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
+ return false;
+ return true;
+}
+
+static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+ if (dig_connector->dpcd[0] >= 0x11) {
+ radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
+ &power_state);
+ }
+}
+
+static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
+ &downspread);
+}
+
+static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
+ u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
+ link_configuration);
+}
+
+static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
+ struct drm_encoder *encoder,
+ u8 train_set[4])
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int i;
+
+ for (i = 0; i < dig_connector->dp_lane_count; i++)
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+ i, train_set[i]);
+
+ radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
+ dig_connector->dp_lane_count, train_set);
+}
+
+static void dp_set_training(struct radeon_connector *radeon_connector,
+ u8 training)
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
+ 1, &training);
+}
+
+void dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+ int enc_id = 0;
+ bool clock_recovery, channel_eq;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ u8 tries, voltage;
+ u8 train_set[4];
+ int i;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+ dig = radeon_encoder->enc_priv;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ if (ASIC_IS_DCE32(rdev)) {
+ if (dig->dig_block)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_LINK_A;
+ } else {
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
+ }
+
+ memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ if (dig_connector->dp_clock == 270000)
+ link_configuration[0] = DP_LINK_BW_2_7;
+ else
+ link_configuration[0] = DP_LINK_BW_1_62;
+ link_configuration[1] = dig_connector->dp_lane_count;
+ if (dig_connector->dpcd[0] >= 0x11)
+ link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ /* power up the sink */
+ dp_set_power(radeon_connector, DP_SET_POWER_D0);
+ /* disable the training pattern on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+ /* set link bw and lanes on the sink */
+ dp_set_link_bw_lanes(radeon_connector, link_configuration);
+ /* disable downspread on the sink */
+ dp_set_downspread(radeon_connector, 0);
+ /* start training on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+ dig_connector->dp_clock, enc_id, 0);
+ /* set training pattern 1 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 0);
+
+ /* set initial vs/emph */
+ memset(train_set, 0, 4);
+ udelay(400);
+ /* set training pattern 1 on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
+
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+ /* clock recovery loop */
+ clock_recovery = false;
+ tries = 0;
+ voltage = 0xff;
+ for (;;) {
+ udelay(100);
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ break;
+
+ if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ for (i = 0; i < dig_connector->dp_lane_count; i++) {
+ if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ }
+ if (i == dig_connector->dp_lane_count) {
+ DRM_ERROR("clock recovery reached max voltage\n");
+ break;
+ }
+
+ if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5) {
+ DRM_ERROR("clock recovery tried 5 times\n");
+ break;
+ }
+ } else
+ tries = 0;
+
+ voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+ }
+ if (!clock_recovery)
+ DRM_ERROR("clock recovery failed\n");
+ else
+ DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
+ train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+
+ /* set training pattern 2 on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
+ /* set training pattern 2 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 1);
+
+ /* channel equalization loop */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+ udelay(400);
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ break;
+
+ if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (tries > 5) {
+ DRM_ERROR("channel eq failed: 5 tries\n");
+ break;
+ }
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+ tries++;
+ }
+
+ if (!channel_eq)
+ DRM_ERROR("channel eq failed\n");
+ else
+ DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
+ train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+ >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+ /* disable the training pattern on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dig_connector->dp_clock, enc_id, 0);
+}
+
+int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+ int ret = 0;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_len, dp_msg_len;
+ int reply_bytes;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[2] = AUX_I2C_READ << 4;
+ else
+ msg[2] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[2] |= AUX_I2C_MOT << 4;
+
+ msg[0] = address;
+ msg[1] = address >> 8;
+
+ reply_bytes = 1;
+
+ msg_len = 4;
+ dp_msg_len = 3;
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[4] = write_byte;
+ msg_len++;
+ dp_msg_len += 2;
+ break;
+ case MODE_I2C_READ:
+ dp_msg_len += 1;
+ break;
+ default:
+ break;
+ }
+
+ msg[3] = (dp_msg_len) << 4;
+ ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
+
+ if (ret) {
+ if (read_byte)
+ *read_byte = reply[0];
+ return reply_bytes;
+ }
+ return -EREMOTEIO;
+}
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9e93eabcf1..824cc6480a0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+/* hpd for digital panel detect/disconnect */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r100_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(RADEON_FP_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP_DETECT_INT_POL;
+ WREG32(RADEON_FP_GEN_CNTL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(RADEON_FP2_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP2_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP2_DETECT_INT_POL;
+ WREG32(RADEON_FP2_GEN_CNTL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void r100_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ rdev->irq.hpd[1] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ r100_irq_set(rdev);
+}
+
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ rdev->irq.hpd[1] = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/*
* PCI GART
*/
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return radeon_gart_table_ram_alloc(rdev);
}
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+}
+
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
WREG32(RADEON_AIC_HI_ADDR, tmp);
- /* Enable bus mastering */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.crtc_vblank_int[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
+ if (rdev->irq.hpd[0]) {
+ tmp |= RADEON_FP_DETECT_MASK;
+ }
+ if (rdev->irq.hpd[1]) {
+ tmp |= RADEON_FP2_DETECT_MASK;
+ }
WREG32(RADEON_GEN_INT_CNTL, tmp);
return 0;
}
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev)
static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
- uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
- RADEON_CRTC2_VBLANK_STAT;
+ uint32_t irq_mask = RADEON_SW_INT_TEST |
+ RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+ RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
int r100_irq_process(struct radeon_device *rdev)
{
uint32_t status, msi_rearm;
+ bool queue_hotplug = false;
status = r100_irq_ack(rdev);
if (!status) {
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
}
+ if (status & RADEON_FP_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (status & RADEON_FP2_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
status = r100_irq_ack(rdev);
}
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
return r;
}
}
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev)
void r100_wb_fini(struct radeon_device *rdev)
{
+ int r;
+
r100_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
- radeon_object_unref(&rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish WB\n", r);
+ return;
+ }
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -1288,17 +1412,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj)
+ struct radeon_bo *robj)
{
unsigned idx;
u32 value;
idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2);
- if ((value + 1) > radeon_object_size(robj)) {
+ if ((value + 1) > radeon_bo_size(robj)) {
DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
"(need %u have %lu) !\n",
value + 1,
- radeon_object_size(robj));
+ radeon_bo_size(robj));
return -EINVAL;
}
return 0;
@@ -1583,6 +1707,14 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
}
+void r100_hdp_flush(struct radeon_device *rdev)
+{
+ u32 tmp;
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
+ WREG32(RADEON_HOST_PATH_CNTL, tmp);
+}
+
void r100_hdp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -1650,6 +1782,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
return 0;
}
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+ /* set these so they don't interfere with anything */
+ WREG32(RADEON_OV0_SCALE_CNTL, 0);
+ WREG32(RADEON_SUBPIC_CNTL, 0);
+ WREG32(RADEON_VIPH_CONTROL, 0);
+ WREG32(RADEON_I2C_CNTL_1, 0);
+ WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+ WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+ WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+}
/*
* VRAM info
@@ -2594,7 +2737,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
- struct radeon_object *cube_robj;
+ struct radeon_bo *cube_robj;
unsigned long size;
for (face = 0; face < 5; face++) {
@@ -2607,9 +2750,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
size += track->textures[idx].cube_info[face].offset;
- if (size > radeon_object_size(cube_robj)) {
+ if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_object_size(cube_robj));
+ size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
@@ -2620,7 +2763,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h;
int ret;
@@ -2676,9 +2819,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
- if (size > radeon_object_size(robj)) {
+ if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_object_size(robj));
+ "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
@@ -2700,10 +2843,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
- if (size > radeon_object_size(track->cb[i].robj)) {
+ if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
- radeon_object_size(track->cb[i].robj));
+ radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
@@ -2717,10 +2860,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
- if (size > radeon_object_size(track->zb.robj)) {
+ if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
- radeon_object_size(track->zb.robj));
+ radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
@@ -2738,11 +2881,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i,
- size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
@@ -2756,10 +2900,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i, size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
return -EINVAL;
}
}
@@ -3101,6 +3247,9 @@ static int r100_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
@@ -3108,13 +3257,13 @@ static int r100_startup(struct radeon_device *rdev)
r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
+ r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -3150,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r100_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r100_startup(rdev);
}
@@ -3174,7 +3325,7 @@ void r100_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -3242,10 +3393,8 @@ int r100_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r100_errata(rdev);
/* Initialize clocks */
@@ -3264,7 +3413,7 @@ int r100_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d76a89..ca50903dd2b 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,26 @@
* CS functions
*/
struct r100_cs_track_cb {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned pitch;
unsigned cpp;
unsigned offset;
};
struct r100_cs_track_array {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned esize;
};
struct r100_cs_cube_info {
- struct radeon_object *robj;
- unsigned offset;
+ struct radeon_bo *robj;
+ unsigned offset;
unsigned width;
unsigned height;
};
struct r100_cs_track_texture {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
unsigned pitch;
unsigned width;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2f43ee8e404..83378c39d0e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
@@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350 ||
+ rdev->family == CHIP_RV350)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -1237,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r300_startup(rdev);
}
@@ -1265,7 +1280,7 @@ void r300_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1303,10 +1318,8 @@ int r300_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r300_errata(rdev);
/* Initialize clocks */
@@ -1325,7 +1338,7 @@ int r300_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1cefdbcc085..c05a7270cf0 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r420_clock_resume(rdev);
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
}
r420_pipes_init(rdev);
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -229,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
}
/* Resume clock after posting */
r420_clock_resume(rdev);
-
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r420_startup(rdev);
}
@@ -258,7 +261,7 @@ void r420_fini(struct radeon_device *rdev)
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
if (rdev->is_atom_bios) {
radeon_atombios_fini(rdev);
} else {
@@ -301,14 +304,9 @@ int r420_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -331,10 +329,13 @@ int r420_init(struct radeon_device *rdev)
return r;
}
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r) {
return r;
}
+ if (rdev->family == CHIP_R420)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 7baa7395556..74ad89bdf2b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -716,6 +716,8 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
+#define AVIVO_DC_GPIO_HPD_A 0x7e94
+
#define AVIVO_GPIO_0 0x7e30
#define AVIVO_GPIO_1 0x7e40
#define AVIVO_GPIO_2 0x7e50
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f7435185c0a..0f3843b6dac 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -221,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r520_startup(rdev);
}
@@ -254,6 +255,9 @@ int r520_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
if (!radeon_card_posted(rdev) && rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
@@ -277,7 +281,7 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 278f646bc18..36656bd110b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
#define PFP_UCODE_SIZE 576
#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ }
+ return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r600_hpd_sense(rdev, hpd);
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+ if (ASIC_IS_DCE32(rdev))
+ tmp |= DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[2] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ r600_irq_set(rdev);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
/*
* R600 PCIE GART
*/
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -394,11 +677,11 @@ int r600_mc_init(struct radeon_device *rdev)
* AGP so that GPU can catch out of VRAM/AGP access
*/
if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enought place before */
+ /* Enough place before */
rdev->mc.vram_location = rdev->mc.gtt_location -
rdev->mc.mc_vram_size;
} else if (tmp > rdev->mc.mc_vram_size) {
- /* Enought place after */
+ /* Enough place after */
rdev->mc.vram_location = rdev->mc.gtt_location +
rdev->mc.gtt_size;
} else {
@@ -1101,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA);
}
+void r600_hdp_flush(struct radeon_device *rdev)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
/*
* CP & Ring
@@ -1110,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev)
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
}
-int r600_cp_init_microcode(struct radeon_device *rdev)
+int r600_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
- size_t pfp_req_size, me_req_size;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size;
char fw_name[30];
int err;
@@ -1128,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
}
switch (rdev->family) {
- case CHIP_R600: chip_name = "R600"; break;
- case CHIP_RV610: chip_name = "RV610"; break;
- case CHIP_RV630: chip_name = "RV630"; break;
- case CHIP_RV620: chip_name = "RV620"; break;
- case CHIP_RV635: chip_name = "RV635"; break;
- case CHIP_RV670: chip_name = "RV670"; break;
+ case CHIP_R600:
+ chip_name = "R600";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV610:
+ chip_name = "RV610";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV630:
+ chip_name = "RV630";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV620:
+ chip_name = "RV620";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV635:
+ chip_name = "RV635";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV670:
+ chip_name = "RV670";
+ rlc_chip_name = "R600";
+ break;
case CHIP_RS780:
- case CHIP_RS880: chip_name = "RS780"; break;
- case CHIP_RV770: chip_name = "RV770"; break;
+ case CHIP_RS880:
+ chip_name = "RS780";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV770:
+ chip_name = "RV770";
+ rlc_chip_name = "R700";
+ break;
case CHIP_RV730:
- case CHIP_RV740: chip_name = "RV730"; break;
- case CHIP_RV710: chip_name = "RV710"; break;
+ case CHIP_RV740:
+ chip_name = "RV730";
+ rlc_chip_name = "R700";
+ break;
+ case CHIP_RV710:
+ chip_name = "RV710";
+ rlc_chip_name = "R700";
+ break;
default: BUG();
}
if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
+ rlc_req_size = R700_RLC_UCODE_SIZE * 4;
} else {
pfp_req_size = PFP_UCODE_SIZE * 4;
me_req_size = PM4_UCODE_SIZE * 12;
+ rlc_req_size = RLC_UCODE_SIZE * 4;
}
- DRM_INFO("Loading %s CP Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1175,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
rdev->me_fw->size, fw_name);
err = -EINVAL;
}
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
out:
platform_device_unregister(pdev);
@@ -1187,6 +1519,8 @@ out:
rdev->pfp_fw = NULL;
release_firmware(rdev->me_fw);
rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
}
return err;
}
@@ -1381,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev)
void r600_wb_disable(struct radeon_device *rdev)
{
+ int r;
+
WREG32(SCRATCH_UMSK, 0);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
}
}
@@ -1392,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev)
{
r600_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_unref(&rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -1403,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ r600_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
- dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
@@ -1433,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
}
int r600_copy_dma(struct radeon_device *rdev,
@@ -1459,18 +1810,6 @@ int r600_copy_blit(struct radeon_device *rdev,
return 0;
}
-int r600_irq_process(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
-int r600_irq_set(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -1506,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
{
int r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
@@ -1516,13 +1863,26 @@ int r600_startup(struct radeon_device *rdev)
}
r600_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
@@ -1583,13 +1943,19 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
@@ -1627,7 +1993,11 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -1650,31 +2020,31 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
r = r600_pcie_gart_init(rdev);
if (r)
return r;
- rdev->accel_working = true;
r = r600_blit_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled blitter (%d).\n", r);
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
return r;
}
+ rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
r600_suspend(rdev);
@@ -1686,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
@@ -1704,6 +2074,8 @@ void r600_fini(struct radeon_device *rdev)
r600_suspend(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
@@ -1712,7 +2084,7 @@ void r600_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1798,8 +2170,657 @@ int r600_ib_test(struct radeon_device *rdev)
return r;
}
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
+ * the same as the CP ring buffer, but in reverse. Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes. As the host irq handler processes interrupts, it
+ * increments the rptr. When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ u32 rb_bufsz;
+
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 4);
+ ring_size = (1 << rb_bufsz) * 4;
+ rdev->ih.ring_size = ring_size;
+ rdev->ih.align_mask = 4 - 1;
+}
+
+static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+{
+ int r;
+
+ rdev->ih.ring_size = ring_size;
+ /* Allocate ring buffer */
+ if (rdev->ih.ring_obj == NULL) {
+ r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ih.ring_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->ih.ring_obj,
+ (void **)&rdev->ih.ring);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+ return r;
+ }
+ }
+ rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+ rdev->ih.rptr = 0;
+
+ return 0;
+}
+
+static void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+ int r;
+ if (rdev->ih.ring_obj) {
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ih.ring_obj);
+ radeon_bo_unpin(rdev->ih.ring_obj);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ }
+ radeon_bo_unref(&rdev->ih.ring_obj);
+ rdev->ih.ring = NULL;
+ rdev->ih.ring_obj = NULL;
+ }
+}
+
+static void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+ if (rdev->family >= CHIP_RV770) {
+ /* r7xx asics need to soft reset RLC before halting */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(15000);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+ }
+
+ WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+ u32 i;
+ const __be32 *fw_data;
+
+ if (!rdev->rlc_fw)
+ return -EINVAL;
+
+ r600_rlc_stop(rdev);
+
+ WREG32(RLC_HB_BASE, 0);
+ WREG32(RLC_HB_CNTL, 0);
+ WREG32(RLC_HB_RPTR, 0);
+ WREG32(RLC_HB_WPTR, 0);
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->family >= CHIP_RV770) {
+ for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else {
+ for (i = 0; i < RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ r600_rlc_start(rdev);
+
+ return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ rdev->ih.enabled = true;
+}
+
+static void r600_disable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ rdev->ih.enabled = false;
+ rdev->ih.wptr = 0;
+ rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(DxMODE_INT_MASK, 0);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ if (ASIC_IS_DCE32(rdev)) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, 0);
+ }
+ } else {
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+ }
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+ /* allocate ring */
+ ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+ if (ret)
+ return ret;
+
+ /* disable irqs */
+ r600_disable_interrupts(rdev);
+
+ /* init rlc */
+ ret = r600_rlc_init(rdev);
+ if (ret) {
+ r600_ih_ring_fini(rdev);
+ return ret;
+ }
+
+ /* setup interrupt control */
+ /* set dummy read address to ring address */
+ WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+ ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1));
+ /* WPTR writeback, not yet */
+ /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
+ WREG32(IH_RB_WPTR_ADDR_LO, 0);
+ WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ /* Default settings for IH_CNTL (disabled at first) */
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+ /* RPTR_REARM only works if msi's are enabled */
+ if (rdev->msi_enabled)
+ ih_cntl |= RPTR_REARM;
+
+#ifdef __BIG_ENDIAN
+ ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
+#endif
+ WREG32(IH_CNTL, ih_cntl);
+
+ /* force the active interrupt state to all disabled */
+ r600_disable_interrupt_state(rdev);
+
+ /* enable irqs */
+ r600_enable_interrupts(rdev);
+
+ return ret;
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_disable_interrupts(rdev);
+ r600_rlc_stop(rdev);
+ r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 mode_int = 0;
+ u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled)
+ return 0;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ if (ASIC_IS_DCE32(rdev)) {
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
+ } else {
+ hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("r600_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("r600_irq_set: vblank 0\n");
+ mode_int |= D1MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("r600_irq_set: vblank 1\n");
+ mode_int |= D2MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("r600_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("r600_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("r600_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("r600_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("r600_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("r600_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DxMODE_INT_MASK, mode_int);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ }
+ } else {
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+ }
+
+ return 0;
+}
+
+static inline void r600_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2)
+{
+ u32 tmp;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ } else {
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = 0;
+ }
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+ WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (*disp_int & LB_D2_VLINE_INTERRUPT)
+ WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int & DC_HPD2_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (ASIC_IS_DCE32(rdev)) {
+ if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+ }
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_disable_interrupt_state(rdev);
+}
+
+static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+ if (wptr & RB_OVERFLOW) {
+ WARN_ON(1);
+ /* XXX deal with overflow */
+ DRM_ERROR("IH RB overflow\n");
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ wptr = wptr & WPTR_OFFSET_MASK;
+
+ return wptr;
+}
+
+/* r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0] - interrupt source id
+ * [31:8] - reserved
+ * [59:32] - interrupt source data
+ * [127:60] - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id src_data description
+ * 1 0 D1 Vblank
+ * 1 1 D1 Vline
+ * 5 0 D2 Vblank
+ * 5 1 D2 Vline
+ * 19 0 FP Hot plug detection A
+ * 19 1 FP Hot plug detection B
+ * 19 2 DAC A auto-detection
+ * 19 3 DAC B auto-detection
+ * 176 - CP_INT RB
+ * 177 - CP_INT IB1
+ * 178 - CP_INT IB2
+ * 181 - EOP Interrupt
+ * 233 - GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = r600_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 last_entry = rdev->ih.ring_size - 16;
+ u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D2_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 19: /* HPD/DAC hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int & DC_HPD2_INTERRUPT) {
+ disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont & DC_HPD3_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont & DC_HPD4_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 10:
+ if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 12:
+ if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ if (rptr == last_entry)
+ rptr = 0;
+ else
+ rptr += 16;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = r600_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
/*
* Debugfs info
@@ -1811,21 +2832,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t rdp, wdp;
unsigned count, i, j;
radeon_ring_free_size(rdev);
- rdp = RREG32(CP_RB_RPTR);
- wdp = RREG32(CP_RB_WPTR);
- count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
- seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
- seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+ seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+ seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
+ i = rdev->cp.rptr;
for (j = 0; j <= count; j++) {
- i = (rdp + j) & rdev->cp.ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (i + 1) & rdev->cp.ptr_mask;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index dbf716e1fbf..9aecafb51b6 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_object_create(rdev, NULL, obj_size,
- true, RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->r600_blit.shader_obj);
+ r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size,
rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
- r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
if (r) {
DRM_ERROR("failed to map blit object %d\n", r);
return r;
}
-
if (rdev->family >= CHIP_RV770)
memcpy_toio(ptr + rdev->r600_blit.state_offset,
r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
if (num_packet2s)
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
packet2s, num_packet2s * 4);
-
-
memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
-
- radeon_object_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
void r600_blit_fini(struct radeon_device *rdev)
{
- radeon_object_unpin(rdev->r600_blit.shader_obj);
- radeon_object_unref(&rdev->r600_blit.shader_obj);
+ int r;
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
+ goto out_unref;
+ }
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+out_unref:
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 3; /* fence emit for VB IB */
+ ring_size += 5; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 3; /* fence emit for done copy */
+ ring_size += 5; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
WARN_ON(r);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 27ab428b149..05894edadab 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -456,7 +456,215 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
-
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define RLC_CNTL 0x3f00
+# define RLC_ENABLE (1 << 0)
+#define RLC_HB_BASE 0x3f10
+#define RLC_HB_CNTL 0x3f0c
+#define RLC_HB_RPTR 0x3f20
+#define RLC_HB_WPTR 0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR 0x3f14
+#define RLC_HB_WPTR_MSB_ADDR 0x3f18
+#define RLC_MC_CNTL 0x3f44
+#define RLC_UCODE_CNTL 0x3f48
+#define RLC_UCODE_ADDR 0x3f2c
+#define RLC_UCODE_DATA 0x3f30
+
+#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_RLC (1 << 13)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define INTERRUPT_CNTL 0x5468
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x546c
+
+#define D1MODE_VBLANK_STATUS 0x6534
+#define D2MODE_VBLANK_STATUS 0x6d34
+# define DxMODE_VBLANK_OCCURRED (1 << 0)
+# define DxMODE_VBLANK_ACK (1 << 4)
+# define DxMODE_VBLANK_STAT (1 << 12)
+# define DxMODE_VBLANK_INTERRUPT (1 << 16)
+# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
+#define D1MODE_VLINE_STATUS 0x653c
+#define D2MODE_VLINE_STATUS 0x6d3c
+# define DxMODE_VLINE_OCCURRED (1 << 0)
+# define DxMODE_VLINE_ACK (1 << 4)
+# define DxMODE_VLINE_STAT (1 << 12)
+# define DxMODE_VLINE_INTERRUPT (1 << 16)
+# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
+#define DxMODE_INT_MASK 0x6540
+# define D1MODE_VBLANK_INT_MASK (1 << 0)
+# define D1MODE_VLINE_INT_MASK (1 << 4)
+# define D2MODE_VBLANK_INT_MASK (1 << 8)
+# define D2MODE_VLINE_INT_MASK (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
+# define DC_HPD1_INTERRUPT (1 << 18)
+# define DC_HPD2_INTERRUPT (1 << 19)
+#define DISP_INTERRUPT_STATUS 0x7edc
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VLINE_INTERRUPT (1 << 3)
+# define LB_D1_VBLANK_INTERRUPT (1 << 4)
+# define LB_D2_VBLANK_INTERRUPT (1 << 5)
+# define DACA_AUTODETECT_INTERRUPT (1 << 16)
+# define DACB_AUTODETECT_INTERRUPT (1 << 17)
+# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
+# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
+# define DC_HPD4_INTERRUPT (1 << 14)
+# define DC_HPD4_RX_INTERRUPT (1 << 15)
+# define DC_HPD3_INTERRUPT (1 << 28)
+# define DC_HPD1_RX_INTERRUPT (1 << 29)
+# define DC_HPD2_RX_INTERRUPT (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
+# define DC_HPD3_RX_INTERRUPT (1 << 0)
+# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
+# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
+# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
+# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
+# define AUX1_SW_DONE_INTERRUPT (1 << 5)
+# define AUX1_LS_DONE_INTERRUPT (1 << 6)
+# define AUX2_SW_DONE_INTERRUPT (1 << 7)
+# define AUX2_LS_DONE_INTERRUPT (1 << 8)
+# define AUX3_SW_DONE_INTERRUPT (1 << 9)
+# define AUX3_LS_DONE_INTERRUPT (1 << 10)
+# define AUX4_SW_DONE_INTERRUPT (1 << 11)
+# define AUX4_LS_DONE_INTERRUPT (1 << 12)
+# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
+# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
+/* DCE 3.2 */
+# define AUX5_SW_DONE_INTERRUPT (1 << 15)
+# define AUX5_LS_DONE_INTERRUPT (1 << 16)
+# define AUX6_SW_DONE_INTERRUPT (1 << 17)
+# define AUX6_LS_DONE_INTERRUPT (1 << 18)
+# define DC_HPD5_INTERRUPT (1 << 19)
+# define DC_HPD5_RX_INTERRUPT (1 << 20)
+# define DC_HPD6_INTERRUPT (1 << 21)
+# define DC_HPD6_RX_INTERRUPT (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL 0x7828
+#define DACB_AUTO_DETECT_CONTROL 0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128
+# define DACx_AUTODETECT_MODE(x) ((x) << 0)
+# define DACx_AUTODETECT_MODE_NONE 0
+# define DACx_AUTODETECT_MODE_CONNECT 1
+# define DACx_AUTODETECT_MODE_DISCONNECT 2
+# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
+#define DACA_AUTODETECT_INT_CONTROL 0x7838
+#define DACB_AUTODETECT_INT_CONTROL 0x7a38
+# define DACx_AUTODETECT_ACK (1 << 0)
+# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24
+# define DC_HOT_PLUG_DETECTx_EN (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28
+# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0)
+# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS 0x7d00
+#define DC_HPD2_INT_STATUS 0x7d0c
+#define DC_HPD3_INT_STATUS 0x7d18
+#define DC_HPD4_INT_STATUS 0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS 0x7dc0
+#define DC_HPD6_INT_STATUS 0x7df4
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
+# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
+# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
+# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL 0x7d04
+#define DC_HPD2_INT_CONTROL 0x7d10
+#define DC_HPD3_INT_CONTROL 0x7d1c
+#define DC_HPD4_INT_CONTROL 0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL 0x7dc4
+#define DC_HPD6_INT_CONTROL 0x7df8
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL 0x7d08
+#define DC_HPD2_CONTROL 0x7d14
+#define DC_HPD3_CONTROL 0x7d20
+#define DC_HPD4_CONTROL 0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL 0x7dc8
+#define DC_HPD6_CONTROL 0x7dfc
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+/* DCE 3.2 */
+# define DC_HPDx_EN (1 << 28)
/*
* PM4
@@ -500,7 +708,6 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
-#define PACKET3_CP_INTERRUPT 0x40
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
@@ -674,4 +881,5 @@
#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 224506a2f7b..c938bb54123 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
#ifndef __RADEON_H__
#define __RADEON_H__
-#include "radeon_object.h"
-
/* TODO: Here are things that needs to be done :
* - surface allocator & initializer : (bit like scratch reg) should
* initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
#include <linux/list.h>
#include <linux/kref.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
#include "radeon_family.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
@@ -85,6 +88,7 @@ extern int radeon_benchmarking;
extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
+extern int radeon_new_pll;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -186,76 +190,62 @@ void radeon_fence_unref(struct radeon_fence **fence);
* Tiling registers
*/
struct radeon_surface_reg {
- struct radeon_object *robj;
+ struct radeon_bo *bo;
};
#define RADEON_GEM_MAX_SURFACES 8
/*
- * Radeon buffer.
+ * TTM.
*/
-struct radeon_object;
+struct radeon_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_global_reference mem_global_ref;
+ bool mem_global_referenced;
+ struct ttm_bo_device bdev;
+};
+
+struct radeon_bo {
+ /* Protected by gem.mutex */
+ struct list_head list;
+ /* Protected by tbo.reserved */
+ u32 placements[3];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ void *kptr;
+ u32 tiling_flags;
+ u32 pitch;
+ int surface_reg;
+ /* Constant after initialization */
+ struct radeon_device *rdev;
+ struct drm_gem_object *gobj;
+};
-struct radeon_object_list {
+struct radeon_bo_list {
struct list_head list;
- struct radeon_object *robj;
+ struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
- uint32_t tiling_flags;
+ u32 tiling_flags;
};
-int radeon_object_init(struct radeon_device *rdev);
-void radeon_object_fini(struct radeon_device *rdev);
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr);
-int radeon_object_kmap(struct radeon_object *robj, void **ptr);
-void radeon_object_kunmap(struct radeon_object *robj);
-void radeon_object_unref(struct radeon_object **robj);
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr);
-void radeon_object_unpin(struct radeon_object *robj);
-int radeon_object_wait(struct radeon_object *robj);
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
-int radeon_object_evict_vram(struct radeon_device *rdev);
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
-void radeon_object_force_delete(struct radeon_device *rdev);
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head);
-int radeon_object_list_validate(struct list_head *head, void *fence);
-void radeon_object_list_unvalidate(struct list_head *head);
-void radeon_object_list_clean(struct list_head *head);
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
- struct vm_area_struct *vma);
-unsigned long radeon_object_size(struct radeon_object *robj);
-void radeon_object_clear_surface_reg(struct radeon_object *robj);
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop);
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch);
-void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
/*
* GEM objects.
*/
struct radeon_gem {
+ struct mutex mutex;
struct list_head objects;
};
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj);
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj);
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +261,7 @@ struct radeon_gart_table_ram {
};
struct radeon_gart_table_vram {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
volatile uint32_t *ptr;
};
@@ -352,11 +342,16 @@ struct radeon_irq {
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2];
+ /* FIXME: use defines for max hpd/dacs */
+ bool hpd[6];
+ spinlock_t sw_lock;
+ int sw_refcount;
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
/*
* CP & ring.
@@ -376,7 +371,7 @@ struct radeon_ib {
*/
struct radeon_ib_pool {
struct mutex mutex;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
@@ -384,7 +379,7 @@ struct radeon_ib_pool {
};
struct radeon_cp {
- struct radeon_object *ring_obj;
+ struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned wptr;
@@ -399,8 +394,25 @@ struct radeon_cp {
bool ready;
};
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+ struct radeon_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ uint64_t gpu_addr;
+ uint32_t align_mask;
+ uint32_t ptr_mask;
+ spinlock_t lock;
+ bool enabled;
+};
+
struct r600_blit {
- struct radeon_object *shader_obj;
+ struct radeon_bo *shader_obj;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
@@ -430,8 +442,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
*/
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- struct radeon_object_list lobj;
+ struct radeon_bo *robj;
+ struct radeon_bo_list lobj;
uint32_t handle;
uint32_t flags;
};
@@ -527,7 +539,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
* Writeback
*/
struct radeon_wb {
- struct radeon_object *wb_obj;
+ struct radeon_bo *wb_obj;
volatile uint32_t *wb;
uint64_t gpu_addr;
};
@@ -639,6 +651,11 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
+ void (*hdp_flush)(struct radeon_device *rdev);
+ void (*hpd_init)(struct radeon_device *rdev);
+ void (*hpd_fini)(struct radeon_device *rdev);
+ bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
};
/*
@@ -751,9 +768,9 @@ struct radeon_device {
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
- struct radeon_object *stollen_vga_memory;
+ struct radeon_bo *stollen_vga_memory;
struct fb_info *fbdev_info;
- struct radeon_object *fbdev_robj;
+ struct radeon_bo *fbdev_rbo;
struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
@@ -791,8 +808,12 @@ struct radeon_device {
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
+ const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
int msi_enabled; /* msi enabled */
+ struct r600_ih ih; /* r6/700 interrupt ring */
+ struct workqueue_struct *wq;
+ struct work_struct hotplug_work;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -829,6 +850,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
}
}
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
/*
* Registers read & write functions.
@@ -965,18 +990,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
+#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
+#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -984,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1021,7 +1053,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
extern void r100_vga_render_disable(struct radeon_device *rdev);
extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj);
+ struct radeon_bo *robj);
extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
@@ -1029,6 +1061,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
+extern void r100_enable_bm(struct radeon_device *rdev);
+extern void r100_set_common_regs(struct radeon_device *rdev);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1104,7 +1138,14 @@ extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_cp_init_microcode(struct radeon_device *rdev);
+extern int r600_init_microcode(struct radeon_device *rdev);
extern int r600_gpu_reset(struct radeon_device *rdev);
+/* r600 irq */
+extern int r600_irq_init(struct radeon_device *rdev);
+extern void r600_irq_fini(struct radeon_device *rdev);
+extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+extern int r600_irq_set(struct radeon_device *rdev);
+
+#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c18fbee387d..636116bedcb 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -76,6 +76,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
+void r100_hdp_flush(struct radeon_device *rdev);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
static struct radeon_asic r100_asic = {
.init = &r100_init,
@@ -107,6 +113,11 @@ static struct radeon_asic r100_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -162,6 +173,11 @@ static struct radeon_asic r300_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
/*
@@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -245,6 +266,11 @@ static struct radeon_asic rs400_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+
static struct radeon_asic rs600_asic = {
.init = &rs600_init,
.fini = &rs600_fini,
@@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
/*
@@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
+void r600_hdp_flush(struct radeon_device *rdev);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -470,6 +528,7 @@ static struct radeon_asic r600_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
@@ -484,6 +543,11 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
};
/*
@@ -509,6 +573,7 @@ static struct radeon_asic rv770_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
@@ -523,6 +588,11 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2ed88a82093..12a0c760e7f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb, uint32_t igp_lane_info,
- uint16_t connector_object_id);
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */
extern void
@@ -60,16 +61,16 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
-static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
- *dev, uint8_t id)
+static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+ uint8_t id)
{
- struct radeon_device *rdev = dev->dev_private;
struct atom_context *ctx = rdev->mode_info.atom_context;
- ATOM_GPIO_I2C_ASSIGMENT gpio;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset;
+ int i;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
@@ -78,34 +79,121 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
- gpio = i2c_info->asGPIO_Info[id];
-
- i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
- i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
- i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
- i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
- i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
- i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
- i2c.put_data_mask = (1 << gpio.ucDataEnShift);
- i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
- i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
- i2c.valid = true;
+
+ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
+ if (gpio->sucI2cId.ucAccess == id) {
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ i2c.valid = true;
+ }
+ }
return i2c;
}
+static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+ u8 id)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ struct radeon_gpio_rec gpio;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+ struct _ATOM_GPIO_PIN_LUT *gpio_info;
+ ATOM_GPIO_PIN_ASSIGNMENT *pin;
+ u16 data_offset, size;
+ int i, num_indices;
+
+ memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+ gpio.valid = false;
+
+ atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+
+ gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ pin = &gpio_info->asGPIO_Pin[i];
+ if (id == pin->ucGPIO_ID) {
+ gpio.id = pin->ucGPIO_ID;
+ gpio.reg = pin->usGpioPin_AIndex * 4;
+ gpio.mask = (1 << pin->ucGpioPinBitShift);
+ gpio.valid = true;
+ break;
+ }
+ }
+
+ return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+ struct radeon_gpio_rec *gpio)
+{
+ struct radeon_hpd hpd;
+ hpd.gpio = *gpio;
+ if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+ switch(gpio->mask) {
+ case (1 << 0):
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ case (1 << 8):
+ hpd.hpd = RADEON_HPD_2;
+ break;
+ case (1 << 16):
+ hpd.hpd = RADEON_HPD_3;
+ break;
+ case (1 << 24):
+ hpd.hpd = RADEON_HPD_4;
+ break;
+ case (1 << 26):
+ hpd.hpd = RADEON_HPD_5;
+ break;
+ case (1 << 28):
+ hpd.hpd = RADEON_HPD_6;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else
+ hpd.hpd = RADEON_HPD_NONE;
+ return hpd;
+}
+
static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint32_t supported_device,
int *connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t *line_mux)
+ uint16_t *line_mux,
+ struct radeon_hpd *hpd)
{
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -135,6 +223,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* HIS X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7146) &&
+ (dev->pdev->subsystem_vendor == 0x17af) &&
+ (dev->pdev->subsystem_device == 0x2058)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+ /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7142) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x2134)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
@@ -172,6 +277,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* Acer laptop reports DVI-D as DVI-I */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1025) &&
+ (dev->pdev->subsystem_device == 0x013c)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
return true;
}
@@ -240,16 +354,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
struct radeon_mode_info *mode_info = &rdev->mode_info;
struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header);
- uint16_t size, data_offset;
- uint8_t frev, crev, line_mux = 0;
+ u16 size, data_offset;
+ u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
int i, j, path_size, device_support;
int connector_type;
- uint16_t igp_lane_info, conn_id, connector_object_id;
+ u16 igp_lane_info, conn_id, connector_object_id;
bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_gpio_rec gpio;
+ struct radeon_hpd hpd;
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -276,7 +392,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
linkb = false;
-
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
@@ -377,10 +492,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
}
- /* look up gpio for ddc */
+ /* look up gpio for ddc, hpd */
if ((le16_to_cpu(path->usDeviceTag) &
- (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- == 0) {
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
if (le16_to_cpu(path->usConnObjectId) ==
le16_to_cpu(con_obj->asObjects[j].
@@ -394,21 +508,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
asObjects[j].
usRecordOffset));
ATOM_I2C_RECORD *i2c_record;
+ ATOM_HPD_INT_RECORD *hpd_record;
+ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+ hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0
&& record->
ucRecordType <=
ATOM_MAX_OBJECT_RECORD_NUMBER) {
- switch (record->
- ucRecordType) {
+ switch (record->ucRecordType) {
case ATOM_I2C_RECORD_TYPE:
i2c_record =
- (ATOM_I2C_RECORD
- *) record;
- line_mux =
- i2c_record->
- sucI2cId.
- bfI2C_LineMux;
+ (ATOM_I2C_RECORD *)
+ record;
+ i2c_config =
+ (ATOM_I2C_ID_CONFIG_ACCESS *)
+ &i2c_record->sucI2cId;
+ ddc_bus = radeon_lookup_i2c_gpio(rdev,
+ i2c_config->
+ ucAccess);
+ break;
+ case ATOM_HPD_INT_RECORD_TYPE:
+ hpd_record =
+ (ATOM_HPD_INT_RECORD *)
+ record;
+ gpio = radeon_lookup_gpio(rdev,
+ hpd_record->ucHPDIntGPIOID);
+ hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ hpd.plugged_state = hpd_record->ucPlugged_PinState;
break;
}
record =
@@ -421,24 +548,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else
- line_mux = 0;
-
- if ((le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_TV1_SUPPORT)
- || (le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_TV2_SUPPORT)
- || (le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_CV_SUPPORT))
+ } else {
+ hpd.hpd = RADEON_HPD_NONE;
ddc_bus.valid = false;
- else
- ddc_bus = radeon_lookup_gpio(dev, line_mux);
+ }
conn_id = le16_to_cpu(path->usConnObjectId);
if (!radeon_atom_apply_quirks
(dev, le16_to_cpu(path->usDeviceTag), &connector_type,
- &ddc_bus, &conn_id))
+ &ddc_bus, &conn_id, &hpd))
continue;
radeon_add_atom_connector(dev,
@@ -447,7 +566,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usDeviceTag),
connector_type, &ddc_bus,
linkb, igp_lane_info,
- connector_object_id);
+ connector_object_id,
+ &hpd);
}
}
@@ -502,6 +622,7 @@ struct bios_connector {
uint16_t devices;
int connector_type;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_hpd hpd;
};
bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@@ -517,7 +638,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint16_t device_support;
uint8_t dac;
union atom_supported_devices *supported_devices;
- int i, j;
+ int i, j, max_device;
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -527,7 +648,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ if (frev > 1)
+ max_device = ATOM_MAX_SUPPORTED_DEVICE;
+ else
+ max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+ for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i];
@@ -553,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
- if ((rdev->family == CHIP_RS690) ||
- (rdev->family == CHIP_RS740)) {
- if ((i == ATOM_DEVICE_DFP2_INDEX)
- && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
- else if ((i == ATOM_DEVICE_DFP3_INDEX)
- && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
- else
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux;
- } else
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.ucAccess;
/* give tv unique connector ids */
if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -582,8 +694,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].line_mux = 52;
} else
bios_connectors[i].ddc_bus =
- radeon_lookup_gpio(dev,
- bios_connectors[i].line_mux);
+ radeon_lookup_i2c_gpio(rdev,
+ bios_connectors[i].line_mux);
+
+ if ((crev > 1) && (frev > 1)) {
+ u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+ switch (isb) {
+ case 0x4:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ break;
+ case 0xa:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ break;
+ default:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else {
+ if (i == ATOM_DEVICE_DFP1_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ else if (i == ATOM_DEVICE_DFP2_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ else
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ }
/* Always set the connector type to VGA for CRT1/CRT2. if they are
* shared with a DVI port, we'll pick up the DVI connector when we
@@ -595,7 +729,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (!radeon_atom_apply_quirks
(dev, (1 << i), &bios_connectors[i].connector_type,
- &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
+ &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+ &bios_connectors[i].hpd))
continue;
bios_connectors[i].valid = true;
@@ -617,9 +752,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
/* combine shared connectors */
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) {
- for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
+ for (j = 0; j < max_device; j++) {
if (bios_connectors[j].valid && (i != j)) {
if (bios_connectors[i].line_mux ==
bios_connectors[j].line_mux) {
@@ -643,6 +778,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].
connector_type =
DRM_MODE_CONNECTOR_DVII;
+ if (bios_connectors[j].devices &
+ (ATOM_DEVICE_DFP_SUPPORT))
+ bios_connectors[i].hpd =
+ bios_connectors[j].hpd;
bios_connectors[j].
valid = false;
}
@@ -653,7 +792,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
/* add the connectors */
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) {
uint16_t connector_object_id =
atombios_get_connector_object_id(dev,
@@ -666,7 +805,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
connector_type,
&bios_connectors[i].ddc_bus,
false, 0,
- connector_object_id);
+ connector_object_id,
+ &bios_connectors[i].hpd);
}
}
@@ -731,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family.
*/
- p1pll->pll_out_min = 64800;
+ if (!radeon_new_pll)
+ p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -861,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL;
+ int i;
if (id > ATOM_MAX_SS_ENTRY)
return NULL;
@@ -878,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (!ss)
return NULL;
- ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
- ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
- ss->step = ss_info->asSS_Info[id].ucSS_Step;
- ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
- ss->range = ss_info->asSS_Info[id].ucSS_Range;
- ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
+ for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+ if (ss_info->asSS_Info[i].ucSS_Id == id) {
+ ss->percentage =
+ le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+ ss->step = ss_info->asSS_Info[i].ucSS_Step;
+ ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+ ss->range = ss_info->asSS_Info[i].ucSS_Range;
+ ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ }
+ }
}
return ss;
}
@@ -901,7 +1048,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
- uint16_t data_offset;
+ uint16_t data_offset, misc;
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
@@ -940,6 +1087,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+
+ misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10bd50a7db8..4ddfd4b5bc5 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
unsigned sdomain, unsigned ddomain)
{
- struct radeon_object *dobj = NULL;
- struct radeon_object *sobj = NULL;
+ struct radeon_bo *dobj = NULL;
+ struct radeon_bo *sobj = NULL;
struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+ r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(sobj, sdomain, &saddr);
+ r = radeon_bo_reserve(sobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(sobj, sdomain, &saddr);
+ radeon_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+ r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(dobj, ddomain, &daddr);
+ r = radeon_bo_reserve(dobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(dobj, ddomain, &daddr);
+ radeon_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
}
out_cleanup:
if (sobj) {
- radeon_object_unpin(sobj);
- radeon_object_unref(&sobj);
+ r = radeon_bo_reserve(sobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(sobj);
+ radeon_bo_unreserve(sobj);
+ }
+ radeon_bo_unref(&sobj);
}
if (dobj) {
- radeon_object_unpin(dobj);
- radeon_object_unref(&dobj);
+ r = radeon_bo_reserve(dobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(dobj);
+ radeon_bo_unreserve(dobj);
+ }
+ radeon_bo_unref(&dobj);
}
if (fence) {
radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index a8135416762..b062109efbe 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
sclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
mclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
ret = radeon_combios_get_clock_info(dev);
if (ret) {
- if (p1pll->reference_div < 2)
- p1pll->reference_div = 12;
+ if (p1pll->reference_div < 2) {
+ if (!ASIC_IS_AVIVO(rdev)) {
+ u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+ if (ASIC_IS_R300(rdev))
+ p1pll->reference_div =
+ (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+ else
+ p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ } else
+ p1pll->reference_div = 12;
+ }
if (p2pll->reference_div < 2)
p2pll->reference_div = 12;
if (rdev->family < CHIP_RS600) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5253cbf6db1..c5021a3445d 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id);
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */
extern void
@@ -442,38 +443,70 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
-struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+ int ddc_line)
{
struct radeon_i2c_bus_rec i2c;
- i2c.mask_clk_mask = RADEON_GPIO_EN_1;
- i2c.mask_data_mask = RADEON_GPIO_EN_0;
- i2c.a_clk_mask = RADEON_GPIO_A_1;
- i2c.a_data_mask = RADEON_GPIO_A_0;
- i2c.put_clk_mask = RADEON_GPIO_EN_1;
- i2c.put_data_mask = RADEON_GPIO_EN_0;
- i2c.get_clk_mask = RADEON_GPIO_Y_1;
- i2c.get_data_mask = RADEON_GPIO_Y_0;
- if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
- (ddc_line == RADEON_MDGPIO_EN_REG)) {
- i2c.mask_clk_reg = ddc_line;
- i2c.mask_data_reg = ddc_line;
- i2c.a_clk_reg = ddc_line;
- i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line + 4;
- i2c.get_data_reg = ddc_line + 4;
+ if (ddc_line == RADEON_GPIOPAD_MASK) {
+ i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c.a_data_reg = RADEON_GPIOPAD_A;
+ i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c.y_data_reg = RADEON_GPIOPAD_Y;
+ } else if (ddc_line == RADEON_MDGPIO_MASK) {
+ i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+ i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+ i2c.a_clk_reg = RADEON_MDGPIO_A;
+ i2c.a_data_reg = RADEON_MDGPIO_A;
+ i2c.en_clk_reg = RADEON_MDGPIO_EN;
+ i2c.en_data_reg = RADEON_MDGPIO_EN;
+ i2c.y_clk_reg = RADEON_MDGPIO_Y;
+ i2c.y_data_reg = RADEON_MDGPIO_Y;
} else {
+ i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+ i2c.mask_data_mask = RADEON_GPIO_EN_0;
+ i2c.a_clk_mask = RADEON_GPIO_A_1;
+ i2c.a_data_mask = RADEON_GPIO_A_0;
+ i2c.en_clk_mask = RADEON_GPIO_EN_1;
+ i2c.en_data_mask = RADEON_GPIO_EN_0;
+ i2c.y_clk_mask = RADEON_GPIO_Y_1;
+ i2c.y_data_mask = RADEON_GPIO_Y_0;
+
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line;
- i2c.get_data_reg = ddc_line;
+ i2c.en_clk_reg = ddc_line;
+ i2c.en_data_reg = ddc_line;
+ i2c.y_clk_reg = ddc_line;
+ i2c.y_data_reg = ddc_line;
+ }
+
+ if (rdev->family < CHIP_R200)
+ i2c.hw_capable = false;
+ else {
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ case RADEON_GPIO_MONID:
+ /* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+ * reliably on some pre-r4xx hardware; not sure why.
+ */
+ i2c.hw_capable = false;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
}
+ i2c.mm_i2c = false;
+ i2c.i2c_id = 0;
if (ddc_line)
i2c.valid = true;
@@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
uint16_t sclk, mclk;
if (rdev->bios == NULL)
- return NULL;
+ return false;
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
if (pll_info) {
@@ -993,8 +1026,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */
};
bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
@@ -1028,7 +1061,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
-
ver = RBIOS8(tmds_info);
DRM_INFO("DFP table revision: %d\n", ver);
if (ver == 3) {
@@ -1063,51 +1095,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds->tmds_pll[i].value);
}
}
- } else
+ } else {
DRM_INFO("No TMDS info found in BIOS\n");
+ return false;
+ }
return true;
}
-struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
- struct radeon_encoder_int_tmds *tmds = NULL;
- bool ret;
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_bus_rec i2c_bus;
- tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+ /* default for macs */
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- if (!tmds)
- return NULL;
-
- ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
- radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+ /* XXX some macs have duallink chips */
+ switch (rdev->mode_info.connector_table) {
+ case CT_POWERBOOK_EXTERNAL:
+ case CT_MINI_EXTERNAL:
+ default:
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
- return tmds;
+ return true;
}
-void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
- uint16_t ext_tmds_info;
- uint8_t ver;
+ uint16_t offset;
+ uint8_t ver, id, blocks, clk, data;
+ int i;
+ enum radeon_combios_ddc gpio;
+ struct radeon_i2c_bus_rec i2c_bus;
if (rdev->bios == NULL)
- return;
+ return false;
- ext_tmds_info =
- combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
- if (ext_tmds_info) {
- ver = RBIOS8(ext_tmds_info);
- DRM_INFO("External TMDS Table revision: %d\n", ver);
- // TODO
+ tmds->i2c_bus = NULL;
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("GPIO Table revision: %d\n", ver);
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ i2c_bus.valid = true;
+ i2c_bus.mask_clk_mask = (1 << clk);
+ i2c_bus.mask_data_mask = (1 << data);
+ i2c_bus.a_clk_mask = (1 << clk);
+ i2c_bus.a_data_mask = (1 << data);
+ i2c_bus.en_clk_mask = (1 << clk);
+ i2c_bus.en_data_mask = (1 << data);
+ i2c_bus.y_clk_mask = (1 << clk);
+ i2c_bus.y_data_mask = (1 << data);
+ i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
+ i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("External TMDS Table revision: %d\n", ver);
+ tmds->slave_addr = RBIOS8(offset + 4 + 2);
+ tmds->slave_addr >>= 1; /* 7 bit addressing */
+ gpio = RBIOS8(offset + 4 + 3);
+ switch (gpio) {
+ case DDC_MONID:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_DVI:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_VGA:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_CRT2:
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if (rdev->family >= CHIP_R300)
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ else
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_LCD: /* MM i2c */
+ DRM_ERROR("MM i2c requires hw i2c engine\n");
+ break;
+ default:
+ DRM_ERROR("Unsupported gpio %d\n", gpio);
+ break;
+ }
+ }
}
+
+ if (!tmds->i2c_bus) {
+ DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+ return false;
+ }
+
+ return true;
}
bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
rdev->mode_info.connector_table = radeon_connector_table;
if (rdev->mode_info.connector_table == CT_NONE) {
@@ -1168,7 +1288,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
/* these are the most common settings */
if (rdev->flags & RADEON_SINGLE_CRTC) {
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1178,10 +1299,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
+ ddc_i2c = combios_setup_i2c_bus(rdev, 0);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1191,10 +1314,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1204,10 +1329,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else {
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1223,10 +1350,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1236,11 +1365,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
}
if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
/* TV - tv dac */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1250,14 +1382,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
}
break;
case CT_IBOOK:
DRM_INFO("Connector Table: %d (ibook)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1265,9 +1399,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - TV DAC */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1275,8 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1285,13 +1424,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_EXTERNAL:
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1299,9 +1440,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* DVI-I - primary dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
@@ -1317,8 +1460,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1327,13 +1473,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_INTERNAL:
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1341,9 +1489,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* DVI-I - primary dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1358,8 +1508,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1368,13 +1521,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_VGA:
DRM_INFO("Connector Table: %d (powerbook vga)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1382,9 +1537,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1392,8 +1549,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1402,13 +1562,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_MINI_EXTERNAL:
DRM_INFO("Connector Table: %d (mini external tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
@@ -1424,8 +1586,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1434,13 +1599,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_MINI_INTERNAL:
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1455,8 +1622,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1465,13 +1635,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_IMAC_G5_ISIGHT:
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
rdev->mode_info.connector_table);
/* DVI-D - int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1479,9 +1651,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1489,8 +1663,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1499,13 +1676,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_EMAC:
DRM_INFO("Connector Table: %d (emac)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1513,9 +1692,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1523,8 +1704,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1533,7 +1717,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
@@ -1550,7 +1735,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
int bios_index,
enum radeon_combios_connector
*legacy_connector,
- struct radeon_i2c_bus_rec *ddc_i2c)
+ struct radeon_i2c_bus_rec *ddc_i2c,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
@@ -1558,29 +1744,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
- *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
else if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
- ddc_i2c->valid = true;
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
ddc_i2c->mask_clk_mask = (0x20 << 8);
ddc_i2c->mask_data_mask = 0x80;
ddc_i2c->a_clk_mask = (0x20 << 8);
ddc_i2c->a_data_mask = 0x80;
- ddc_i2c->put_clk_mask = (0x20 << 8);
- ddc_i2c->put_data_mask = 0x80;
- ddc_i2c->get_clk_mask = (0x20 << 8);
- ddc_i2c->get_data_mask = 0x80;
- ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
- ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
- ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
- ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
- ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
- ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
+ ddc_i2c->en_clk_mask = (0x20 << 8);
+ ddc_i2c->en_data_mask = 0x80;
+ ddc_i2c->y_clk_mask = (0x20 << 8);
+ ddc_i2c->y_data_mask = 0x80;
}
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if ((rdev->family >= CHIP_R300) &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
if (dev->pdev->device == 0x515e &&
@@ -1624,6 +1807,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
dev->pdev->subsystem_device == 0x280a)
return false;
+ /* MSI S270 has non-existent TV port */
+ if (dev->pdev->device == 0x5955 &&
+ dev->pdev->subsystem_vendor == 0x1462 &&
+ dev->pdev->subsystem_device == 0x0131)
+ return false;
+
return true;
}
@@ -1671,6 +1860,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
enum radeon_combios_connector connector;
int i = 0;
struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
if (rdev->bios == NULL)
return false;
@@ -1691,26 +1881,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
switch (ddc_type) {
case DDC_MONID:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
break;
default:
break;
}
+ switch (connector) {
+ case CONNECTOR_PROPRIETARY_LEGACY:
+ case CONNECTOR_DVI_I_LEGACY:
+ case CONNECTOR_DVI_D_LEGACY:
+ if ((tmp >> 4) & 0x1)
+ hpd.hpd = RADEON_HPD_2;
+ else
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+
if (!radeon_apply_legacy_quirks(dev, i, &connector,
- &ddc_i2c))
+ &ddc_i2c, &hpd))
continue;
switch (connector) {
@@ -1727,7 +1931,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
break;
case CONNECTOR_CRT_LEGACY:
if (tmp & 0x1) {
@@ -1753,7 +1958,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
break;
case CONNECTOR_DVI_I_LEGACY:
devices = 0;
@@ -1799,7 +2005,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- connector_object_id);
+ connector_object_id,
+ &hpd);
break;
case CONNECTOR_DVI_D_LEGACY:
if ((tmp >> 4) & 0x1) {
@@ -1817,7 +2024,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- connector_object_id);
+ connector_object_id,
+ &hpd);
break;
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
@@ -1832,7 +2040,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
default:
DRM_ERROR("Unknown connector type: %d\n",
@@ -1858,14 +2067,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
0),
ATOM_DEVICE_DFP1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
} else {
uint16_t crt_info =
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
@@ -1876,13 +2087,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else {
DRM_DEBUG("No connector info found\n");
return false;
@@ -1910,27 +2123,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
case DDC_MONID:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_MONID);
+ (rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_DVI_DDC);
+ (rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_VGA_DDC);
+ (rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_CRT2_DDC);
+ (rdev, RADEON_GPIO_CRT2_DDC);
break;
case DDC_LCD:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_LCD_GPIO_MASK);
+ (rdev, RADEON_GPIOPAD_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
@@ -1939,19 +2152,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
case DDC_GPIO:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_MDGPIO_EN_REG);
+ (rdev, RADEON_MDGPIO_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
@@ -1960,13 +2173,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
default:
@@ -1977,12 +2190,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else
ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
5,
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
}
}
@@ -1993,6 +2208,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
if (radeon_apply_legacy_tv_quirks(dev)) {
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev,
@@ -2003,7 +2219,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
}
}
}
@@ -2014,6 +2231,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
return true;
}
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (!tmds)
+ return;
+
+ switch (tmds->dvo_chip) {
+ case DVO_SIL164:
+ /* sil 164 */
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x09, 0x00);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x3b);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ case DVO_SIL1178:
+ /* sil 1178 - untested */
+ /*
+ * 0x0f, 0x44
+ * 0x0f, 0x4c
+ * 0x0e, 0x01
+ * 0x0a, 0x80
+ * 0x09, 0x30
+ * 0x0c, 0xc9
+ * 0x0d, 0x70
+ * 0x08, 0x32
+ * 0x08, 0x33
+ */
+ break;
+ default:
+ break;
+ }
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint16_t offset;
+ uint8_t blocks, slave_addr, rev;
+ uint32_t index, id;
+ uint32_t reg, val, and_mask, or_mask;
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (rdev->bios == NULL)
+ return false;
+
+ if (!tmds)
+ return false;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+ rev = RBIOS8(offset);
+ if (offset) {
+ rev = RBIOS8(offset);
+ if (rev > 1) {
+ blocks = RBIOS8(offset + 3);
+ index = offset + 4;
+ while (blocks > 0) {
+ id = RBIOS16(index);
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ index += 4;
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 3:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val * 1000);
+ break;
+ case 6:
+ slave_addr = id & 0xff;
+ slave_addr >>= 1; /* 7 bit addressing */
+ index++;
+ reg = RBIOS8(index);
+ index++;
+ val = RBIOS8(index);
+ index++;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ blocks--;
+ }
+ return true;
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ index = offset + 10;
+ id = RBIOS16(index);
+ while (id != 0xffff) {
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 5:
+ reg = id & 0x1fff;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32_PLL(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32_PLL(reg, val);
+ break;
+ case 6:
+ reg = id & 0x1fff;
+ val = RBIOS8(index);
+ index += 1;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ id = RBIOS16(index);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 29763ceae3a..5eece186e03 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_needs_link_train(radeon_connector)) {
+ if (connector->encoder)
+ dp_link_train(connector->encoder, connector);
+ }
+ }
+ }
+
+}
+
static void radeon_property_change_mode(struct drm_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
@@ -445,10 +465,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid)
ret = connector_status_connected;
}
@@ -553,17 +573,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -708,17 +728,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -735,6 +755,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
ret = connector_status_disconnected;
} else
ret = connector_status_connected;
+
+ /* multiple connectors on the same encoder with the same ddc line
+ * This tends to be HDMI and DVI on the same encoder with the
+ * same ddc line. If the edid says HDMI, consider the HDMI port
+ * connected and the DVI port disconnected. If the edid doesn't
+ * say HDMI, vice versa.
+ */
+ if (radeon_connector->shared_ddc && connector_status_connected) {
+ struct drm_device *dev = connector->dev;
+ struct drm_connector *list_connector;
+ struct radeon_connector *list_radeon_connector;
+ list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+ if (connector == list_connector)
+ continue;
+ list_radeon_connector = to_radeon_connector(list_connector);
+ if (radeon_connector->devices == list_radeon_connector->devices) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ } else {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ }
+ }
+ }
+ }
}
}
@@ -863,6 +916,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
.force = radeon_dvi_force,
};
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ if (radeon_connector->ddc_bus)
+ radeon_i2c_destroy(radeon_connector->ddc_bus);
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
+ if (radeon_dig_connector->dp_i2c_bus)
+ radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+ kfree(radeon_connector->con_priv);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+ return ret;
+}
+
+static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ u8 sink_type;
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+
+ sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_getdpcd(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
+ ret = connector_status_connected;
+ }
+ } else {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_ddc_probe(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
+ ret = connector_status_connected;
+ }
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
+
+ return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ /* XXX check mode bandwidth */
+
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return radeon_dp_mode_valid_helper(radeon_connector, mode);
+ else
+ return MODE_OK;
+}
+
+struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+ .get_modes = radeon_dp_get_modes,
+ .mode_valid = radeon_dp_mode_valid,
+ .best_encoder = radeon_dvi_encoder,
+};
+
+struct drm_connector_funcs radeon_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_connector_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
@@ -871,7 +1009,8 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb,
uint32_t igp_lane_info,
- uint16_t connector_object_id)
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -911,6 +1050,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -963,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- 1);
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
@@ -997,16 +1139,23 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+ ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (ret)
goto failed;
if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (!radeon_dig_connector->dp_i2c_bus)
+ goto failed;
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.coherent_mode_property,
+ 1);
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1020,6 +1169,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ 1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -1038,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
goto failed;
}
- drm_mode_create_scaling_mode_property(dev);
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1063,7 +1214,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id)
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -1093,6 +1245,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1160,6 +1313,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ 1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 4f7afc79dd8..0b2f9c2ad2c 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
for (t = 0; t < dev_priv->usec_timeout; t++) {
u32 done_age = GET_SCRATCH(dev_priv, 1);
DRM_DEBUG("done_age = %d\n", done_age);
- for (i = start; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[start];
buf_priv = buf->dev_private;
if (buf->file_priv == NULL || (buf->pending &&
buf_priv->age <=
@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
buf->pending = 0;
return buf;
}
- start = 0;
+ if (++start >= dma->buf_count)
+ start = 0;
}
if (t) {
@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
}
}
- DRM_DEBUG("returning NULL!\n");
return NULL;
}
-#if 0
-struct drm_buf *radeon_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_buf_priv_t *buf_priv;
- struct drm_buf *buf;
- int i, t;
- int start;
- u32 done_age;
-
- done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
- if (++dev_priv->last_buf >= dma->buf_count)
- dev_priv->last_buf = 0;
-
- start = dev_priv->last_buf;
- dev_priv->stats.freelist_loops++;
-
- for (t = 0; t < 2; t++) {
- for (i = start; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
- if (buf->file_priv == 0 || (buf->pending &&
- buf_priv->age <=
- done_age)) {
- dev_priv->stats.requested_bufs++;
- buf->pending = 0;
- return buf;
- }
- }
- start = 0;
- }
-
- return NULL;
-}
-#endif
-
void radeon_freelist_reset(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf96a26..65590a0f1d9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
- p->relocs[i].lobj.robj = p->relocs[i].robj;
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
INIT_LIST_HEAD(&p->relocs[i].lobj.list);
- radeon_object_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
}
}
- return radeon_object_list_validate(&p->validated, p->ib->fence);
+ return radeon_bo_list_validate(&p->validated, p->ib->fence);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
if (error) {
- radeon_object_list_unvalidate(&parser->validated);
+ radeon_bo_list_unvalidate(&parser->validated,
+ parser->ib->fence);
} else {
- radeon_object_list_clean(&parser->validated);
+ radeon_bo_list_unreserve(&parser->validated);
}
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 41bb76fbe73..02bcdb1240c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
if (rdev->family < CHIP_R600) {
int i;
- for (i = 0; i < 8; i++) {
- WREG32(RADEON_SURFACE0_INFO +
- i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
- 0);
+ for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+ if (rdev->surface_regs[i].bo)
+ radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+ else
+ radeon_clear_surface_reg(rdev, i);
}
/* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0);
@@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+ if (radeon_card_posted(rdev))
+ return true;
+
+ if (rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ if (rdev->is_atom_bios)
+ atom_asic_init(rdev->mode_info.atom_context);
+ else
+ radeon_combios_asic_init(rdev->ddev);
+ return true;
+ } else {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return false;
+ }
+}
+
int radeon_dummy_page_init(struct radeon_device *rdev)
{
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -463,12 +482,16 @@ int radeon_atombios_init(struct radeon_device *rdev)
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+ atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
}
void radeon_atombios_fini(struct radeon_device *rdev)
{
- kfree(rdev->mode_info.atom_context);
+ if (rdev->mode_info.atom_context) {
+ kfree(rdev->mode_info.atom_context->scratch);
+ kfree(rdev->mode_info.atom_context);
+ }
kfree(rdev->mode_info.atom_card_info);
}
@@ -544,16 +567,24 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
+ if (rdev->family >= CHIP_R600)
+ spin_lock_init(&rdev->ih.lock);
+ mutex_init(&rdev->gem.mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
+ /* setup workqueue */
+ rdev->wq = create_workqueue("radeon");
+ if (rdev->wq == NULL)
+ return -ENOMEM;
+
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r) {
return r;
}
- if (radeon_agpmode == -1) {
+ if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
@@ -620,6 +651,7 @@ void radeon_device_fini(struct radeon_device *rdev)
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
radeon_fini(rdev);
+ destroy_workqueue(rdev->wq);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
@@ -633,6 +665,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *crtc;
+ int r;
if (dev == NULL || rdev == NULL) {
return -ENODEV;
@@ -643,26 +676,31 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
- struct radeon_object *robj;
+ struct radeon_bo *robj;
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_robj) {
- radeon_object_unpin(robj);
+ if (robj != rdev->fbdev_rbo) {
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
}
/* evict vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
radeon_fence_wait_last(rdev);
radeon_save_bios_scratch_regs(rdev);
radeon_suspend(rdev);
+ radeon_hpd_fini(rdev);
/* evict remaining vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
@@ -695,6 +733,8 @@ int radeon_resume_kms(struct drm_device *dev)
fb_set_suspend(rdev->fbdev_info, 0);
release_console_sem();
+ /* reset hpd state */
+ radeon_hpd_init(rdev);
/* blat the mode back in */
drm_helper_resume_force_mode(dev);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c85df4afcb7..a133b833e45 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -250,6 +250,16 @@ static const char *connector_names[13] = {
"HDMI-B",
};
+static const char *hpd_names[7] = {
+ "NONE",
+ "HPD1",
+ "HPD2",
+ "HPD3",
+ "HPD4",
+ "HPD5",
+ "HPD6",
+};
+
static void radeon_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -264,16 +274,18 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus)
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
radeon_connector->ddc_bus->rec.a_clk_reg,
radeon_connector->ddc_bus->rec.a_data_reg,
- radeon_connector->ddc_bus->rec.put_clk_reg,
- radeon_connector->ddc_bus->rec.put_data_reg,
- radeon_connector->ddc_bus->rec.get_clk_reg,
- radeon_connector->ddc_bus->rec.get_data_reg);
+ radeon_connector->ddc_bus->rec.en_clk_reg,
+ radeon_connector->ddc_bus->rec.en_data_reg,
+ radeon_connector->ddc_bus->rec.y_clk_reg,
+ radeon_connector->ddc_bus->rec.y_data_reg);
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -324,6 +336,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
if (ret) {
+ radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
@@ -336,12 +349,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
int ret = 0;
+ if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
+ }
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
if (radeon_connector->edid) {
@@ -361,9 +379,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
@@ -542,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags)
+{
+ fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
+ fixed20_12 pll_out_max, pll_out_min;
+ fixed20_12 pll_in_max, pll_in_min;
+ fixed20_12 reference_freq;
+ fixed20_12 error, ffreq, a, b;
+
+ pll_out_max.full = rfixed_const(pll->pll_out_max);
+ pll_out_min.full = rfixed_const(pll->pll_out_min);
+ pll_in_max.full = rfixed_const(pll->pll_in_max);
+ pll_in_min.full = rfixed_const(pll->pll_in_min);
+ reference_freq.full = rfixed_const(pll->reference_freq);
+ do_div(freq, 10);
+ ffreq.full = rfixed_const(freq);
+ error.full = rfixed_const(100 * 100);
+
+ /* max p */
+ p.full = rfixed_div(pll_out_max, ffreq);
+ p.full = rfixed_floor(p);
+
+ /* min m */
+ m.full = rfixed_div(reference_freq, pll_in_max);
+ m.full = rfixed_ceil(m);
+
+ while (1) {
+ n.full = rfixed_div(ffreq, reference_freq);
+ n.full = rfixed_mul(n, m);
+ n.full = rfixed_mul(n, p);
+
+ f_vco.full = rfixed_div(n, m);
+ f_vco.full = rfixed_mul(f_vco, reference_freq);
+
+ f_pclk.full = rfixed_div(f_vco, p);
+
+ if (f_pclk.full > ffreq.full)
+ error.full = f_pclk.full - ffreq.full;
+ else
+ error.full = ffreq.full - f_pclk.full;
+ error.full = rfixed_div(error, f_pclk);
+ a.full = rfixed_const(100 * 100);
+ error.full = rfixed_mul(error, a);
+
+ a.full = rfixed_mul(m, p);
+ a.full = rfixed_div(n, a);
+ best_freq.full = rfixed_mul(reference_freq, a);
+
+ if (rfixed_trunc(error) < 25)
+ break;
+
+ a.full = rfixed_const(1);
+ m.full = m.full + a.full;
+ a.full = rfixed_div(reference_freq, m);
+ if (a.full >= pll_in_min.full)
+ continue;
+
+ m.full = rfixed_div(reference_freq, pll_in_max);
+ m.full = rfixed_ceil(m);
+ a.full= rfixed_const(1);
+ p.full = p.full - a.full;
+ a.full = rfixed_mul(p, ffreq);
+ if (a.full >= pll_out_min.full)
+ continue;
+ else {
+ DRM_ERROR("Unable to find pll dividers\n");
+ break;
+ }
+ }
+
+ a.full = rfixed_const(10);
+ b.full = rfixed_mul(n, a);
+
+ frac_n.full = rfixed_floor(n);
+ frac_n.full = rfixed_mul(frac_n, a);
+ frac_n.full = b.full - frac_n.full;
+
+ *dot_clock_p = rfixed_trunc(best_freq);
+ *fb_div_p = rfixed_trunc(n);
+ *frac_fb_div_p = rfixed_trunc(frac_n);
+ *ref_div_p = rfixed_trunc(m);
+ *post_div_p = rfixed_trunc(p);
+
+ DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
+
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -642,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
return -ENOMEM;
rdev->mode_info.coherent_mode_property->values[0] = 0;
- rdev->mode_info.coherent_mode_property->values[0] = 1;
+ rdev->mode_info.coherent_mode_property->values[1] = 1;
}
if (!ASIC_IS_AVIVO(rdev)) {
@@ -666,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0;
- rdev->mode_info.load_detect_property->values[0] = 1;
+ rdev->mode_info.load_detect_property->values[1] = 1;
drm_mode_create_scaling_mode_property(rdev->ddev);
@@ -723,6 +833,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
if (!ret) {
return ret;
}
+ /* initialize hpd */
+ radeon_hpd_init(rdev);
drm_helper_initial_config(rdev->ddev);
return 0;
}
@@ -730,6 +842,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.mode_config_initialized) {
+ radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
@@ -750,9 +863,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
if (first) {
- radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ /* set scaling */
+ if (radeon_encoder->rmx_type == RMX_OFF)
+ radeon_crtc->rmx_type = RMX_OFF;
+ else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+ mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+ radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ else
+ radeon_crtc->rmx_type = RMX_OFF;
+ /* copy native mode */
memcpy(&radeon_crtc->native_mode,
- &radeon_encoder->native_mode,
+ &radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
first = false;
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7f50fb864af..c5c45e626d7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
+int radeon_new_pll = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
+MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
+module_param_named(new_pll, radeon_new_pll, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 350962e0f34..e13785282a8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
# define R600_IT_WAIT_REG_MEM 0x00003C00
# define R600_IT_MEM_WRITE 0x00003D00
# define R600_IT_INDIRECT_BUFFER 0x00003200
-# define R600_IT_CP_INTERRUPT 0x00004000
# define R600_IT_SURFACE_SYNC 0x00004300
# define R600_CB0_DEST_BASE_ENA (1 << 6)
# define R600_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d42bc512d75..b4f23ec9320 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -35,6 +35,51 @@ extern int atom_debug;
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *clone_encoder;
+ uint32_t index_mask = 0;
+ int count;
+
+ /* DIG routing gets problematic */
+ if (rdev->family >= CHIP_R600)
+ return index_mask;
+ /* LVDS/TV are too wacky */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return index_mask;
+ /* DVO requires 2x ppll clocks depending on tmds chip */
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ return index_mask;
+
+ count = -1;
+ list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+ count++;
+
+ if (clone_encoder == encoder)
+ continue;
+ if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ continue;
+ if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ continue;
+ else
+ index_mask |= (1 << count);
+ }
+ return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->possible_clones = radeon_encoder_clones(encoder);
+ }
+}
+
uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
@@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-/* used for both atom and legacy */
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
-
- if (mode->hdisplay < native_mode->hdisplay ||
- mode->vdisplay < native_mode->vdisplay) {
- int mode_id = adjusted_mode->base.id;
- *adjusted_mode = *native_mode;
- if (!ASIC_IS_AVIVO(rdev)) {
- adjusted_mode->hdisplay = mode->hdisplay;
- adjusted_mode->vdisplay = mode->vdisplay;
- }
- adjusted_mode->base.id = mode_id;
- }
-}
-
-
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
-
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ if (!ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ }
+ adjusted_mode->base.id = mode_id;
+ }
+
+ /* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
@@ -218,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
}
}
+ if (ASIC_IS_DCE3(rdev) &&
+ (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ radeon_dp_set_link_config(connector, mode);
+ }
+
return true;
}
@@ -392,7 +430,7 @@ union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
};
-static void
+void
atombios_digital_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
@@ -522,6 +560,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *radeon_dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -551,10 +590,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- /*if (radeon_output->MonType == MT_DP)
- return ATOM_ENCODER_MODE_DP;
- else*/
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ radeon_dig_connector = radeon_connector->con_priv;
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return ATOM_ENCODER_MODE_DP;
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -573,6 +612,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
}
}
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link B -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
+ */
static void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
{
@@ -614,10 +677,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
num = 2;
break;
@@ -652,18 +722,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
}
}
- if (radeon_encoder->pixel_clock > 165000) {
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
+ args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dig_connector->dp_clock == 270000)
+ args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.ucLaneNum = dig_connector->dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
args.ucLaneNum = 8;
- } else {
- if (dig_connector->linkb)
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ else
args.ucLaneNum = 4;
- }
- args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+ if (dig_connector->linkb)
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -674,8 +747,8 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
};
-static void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -687,6 +760,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
+ bool is_dp = false;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -704,6 +778,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
dig_connector = radeon_connector->con_priv;
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
+ is_dp = true;
+
memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE32(rdev))
@@ -724,17 +801,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = radeon_connector->connector_object_id;
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v1.asMode.ucLaneSel = lane_num;
+ args.v1.asMode.ucLaneSet = lane_set;
} else {
- if (radeon_encoder->pixel_clock > 165000)
+ if (is_dp)
+ args.v1.usPixelClock =
+ cpu_to_le16(dig_connector->dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE32(rdev)) {
- if (radeon_encoder->pixel_clock > 165000)
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
if (dig->dig_block)
args.v2.acConfig.ucEncoderSel = 1;
+ if (dig_connector->linkb)
+ args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -751,7 +834,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
break;
}
- if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (is_dp)
+ args.v2.acConfig.fCoherentMode = 1;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
}
@@ -760,17 +845,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) {
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B);
if (dig_connector->igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (dig_connector->igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
} else {
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
if (dig_connector->igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (dig_connector->igp_lane_info & 0x2)
@@ -780,35 +868,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
else if (dig_connector->igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
}
- } else {
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B |
- ATOM_TRANSMITTER_CONFIG_LANE_0_7);
- else {
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- }
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B |
- ATOM_TRANSMITTER_CONFIG_LANE_0_7);
- else {
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- }
break;
}
- if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+ if (is_dp)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
}
@@ -918,12 +996,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ dp_link_train(encoder, connector);
+ }
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
break;
}
} else {
@@ -1025,13 +1107,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- } else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ } else {
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return;
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1104,11 +1206,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- if (radeon_encoder->enc_priv) {
- struct radeon_encoder_atom_dig *dig;
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_atom_dig *dig;
- dig = radeon_encoder->enc_priv;
- dig->dig_block = radeon_crtc->crtc_id;
+ dig = radeon_encoder->enc_priv;
+ dig->dig_block = radeon_crtc->crtc_id;
+ }
}
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1134,14 +1239,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1354,7 +1459,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index b38c4c8e2c6..3ba213d1b06 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -59,7 +59,7 @@ static struct fb_ops radeonfb_ops = {
};
/**
- * Curretly it is assumed that the old framebuffer is reused.
+ * Currently it is assumed that the old framebuffer is reused.
*
* LOCKING
* caller should hold the mode config lock.
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
- struct radeon_object *robj = NULL;
+ struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, ttm_bo_type_kernel,
- false, &gobj);
+ &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
- robj = gobj->driver_private;
+ rbo = gobj->driver_private;
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
}
#endif
- if (tiling_flags)
- radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
+ if (tiling_flags) {
+ ret = radeon_bo_set_tiling_flags(rbo,
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd.pitch);
+ if (ret)
+ dev_err(rdev->dev, "FB failed to set tiling flags\n");
+ }
mutex_lock(&rdev->ddev->struct_mutex);
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
ret = -ENOMEM;
goto out_unref;
}
- ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_reserve(rbo, false);
+ if (unlikely(ret != 0))
+ goto out_unref;
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ radeon_bo_unreserve(rbo);
+ goto out_unref;
+ }
+ if (fb_tiled)
+ radeon_bo_check_tiling(rbo, 0, 0);
+ ret = radeon_bo_kmap(rbo, &fbptr);
+ radeon_bo_unreserve(rbo);
if (ret) {
- printk(KERN_ERR "failed to pin framebuffer\n");
- ret = -ENOMEM;
goto out_unref;
}
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
*fb_p = fb;
rfb = to_radeon_framebuffer(fb);
rdev->fbdev_rfb = rfb;
- rdev->fbdev_robj = robj;
+ rdev->fbdev_rbo = rbo;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- if (fb_tiled)
- radeon_object_check_tiling(robj, 0, 0);
-
- ret = radeon_object_kmap(robj, &fbptr);
- if (ret) {
- goto out_unref;
- }
-
- memset_io(fbptr, 0, aligned_size);
+ memset_io(fbptr, 0xff, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
return 0;
out_unref:
- if (robj) {
- radeon_object_kunmap(robj);
+ if (rbo) {
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
}
if (fb && ret) {
list_del(&fb->filp_head);
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
int radeonfb_probe(struct drm_device *dev)
{
- return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
+ struct radeon_device *rdev = dev->dev_private;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
}
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
{
struct fb_info *info;
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
+ int r;
if (!fb) {
return -EINVAL;
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
info = fb->fbdev;
if (info) {
struct radeon_fb_device *rfbdev = info->par;
- robj = rfb->obj->driver_private;
+ rbo = rfb->obj->driver_private;
unregister_framebuffer(info);
- radeon_object_kunmap(robj);
- radeon_object_unpin(robj);
+ r = radeon_bo_reserve(rbo, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
drm_fb_helper_free(&rfbdev->helper);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3beb26d7471..cb4cd97ae39 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return signaled;
}
-int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
-{
- struct radeon_device *rdev;
- int ret = 0;
-
- rdev = fence->rdev;
-
- __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
- while (1) {
- if (radeon_fence_signaled(fence))
- break;
-
- if (time_after_eq(jiffies, fence->timeout)) {
- ret = -EBUSY;
- break;
- }
-
- if (lazy)
- schedule_timeout(1);
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- return ret;
-}
-
-
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return 0;
}
- if (rdev->family >= CHIP_R600) {
- r = r600_fence_wait(fence, intr, 0);
- if (r == -ERESTARTSYS)
- return -EBUSY;
- return r;
- }
-
retry:
cur_jiffies = jiffies;
timeout = HZ / 100;
@@ -231,14 +193,17 @@ retry:
}
if (intr) {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
- if (unlikely(r == -ERESTARTSYS)) {
- return -EBUSY;
- }
+ radeon_irq_kms_sw_irq_put(rdev);
+ if (unlikely(r < 0))
+ return r;
} else {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
+ radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
if (unlikely(r == 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
index 90187d17384..3d4d84e078a 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -38,6 +38,23 @@ typedef union rfixed {
#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
#define rfixed_trunc(A) ((A).full >> 12)
+static inline u32 rfixed_floor(fixed20_12 A)
+{
+ u32 non_frac = rfixed_trunc(A);
+
+ return rfixed_const(non_frac);
+}
+
+static inline u32 rfixed_ceil(fixed20_12 A)
+{
+ u32 non_frac = rfixed_trunc(A);
+
+ if (A.full > rfixed_const(non_frac))
+ return rfixed_const(non_frac + 1);
+ else
+ return rfixed_const(non_frac);
+}
+
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
{
u64 tmp = ((u64)A.full << 13);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d7566178..e73d56e83fa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_object_create(rdev, NULL,
- rdev->gart.table_size,
- true,
- RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->gart.table.vram.robj);
+ r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->gart.table.vram.robj);
if (r) {
return r;
}
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
uint64_t gpu_addr;
int r;
- r = radeon_object_pin(rdev->gart.table.vram.robj,
- RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
- if (r) {
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (unlikely(r != 0))
return r;
- }
- r = radeon_object_kmap(rdev->gart.table.vram.robj,
- (void **)&rdev->gart.table.vram.ptr);
+ r = radeon_bo_pin(rdev->gart.table.vram.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
- DRM_ERROR("radeon: failed to map gart vram table.\n");
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
return r;
}
+ r = radeon_bo_kmap(rdev->gart.table.vram.robj,
+ (void **)&rdev->gart.table.vram.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
- return 0;
+ return r;
}
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
+ int r;
+
if (rdev->gart.table.vram.robj == NULL) {
return;
}
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ radeon_bo_unref(&rdev->gart.table.vram.robj);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d880edf254d..2944486871b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_object *robj = gobj->driver_private;
+ struct radeon_bo *robj = gobj->driver_private;
gobj->driver_private = NULL;
if (robj) {
- radeon_object_unref(&robj);
+ radeon_bo_unref(&robj);
}
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj)
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj)
{
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
*obj = NULL;
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
- interruptible, &robj);
+ r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
size, initial_domain, alignment);
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_object *robj = obj->driver_private;
- uint32_t flags;
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
- switch (pin_domain) {
- case RADEON_GEM_DOMAIN_VRAM:
- flags = TTM_PL_FLAG_VRAM;
- break;
- case RADEON_GEM_DOMAIN_GTT:
- flags = TTM_PL_FLAG_TT;
- break;
- default:
- flags = TTM_PL_FLAG_SYSTEM;
- break;
- }
- return radeon_object_pin(robj, flags, gpu_addr);
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+ radeon_bo_unreserve(robj);
+ return r;
}
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_object *robj = obj->driver_private;
- radeon_object_unpin(robj);
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
+
+ r = radeon_bo_reserve(robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
uint32_t domain;
int r;
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
if (r) {
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
+ radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
void radeon_gem_fini(struct radeon_device *rdev)
{
- radeon_object_force_delete(rdev);
+ radeon_bo_force_delete(rdev);
}
@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct drm_radeon_gem_info *args = data;
args->vram_size = rdev->mc.real_vram_size;
- /* FIXME: report somethings that makes sense */
- args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
- args->gart_size = rdev->mc.gtt_size;
+ args->vram_visible = rdev->mc.real_vram_size;
+ if (rdev->stollen_vga_memory)
+ args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+ if (rdev->fbdev_rbo)
+ args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+ RADEON_IB_POOL_SIZE*64*1024;
return 0;
}
@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, true, &gobj);
+ args->initial_domain, false,
+ false, &gobj);
if (r) {
return r;
}
@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* just validate the BO into a certain domain */
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- int r;
+ struct radeon_bo *robj;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_mmap(robj, &args->addr_ptr);
+ args->addr_ptr = radeon_bo_mmap_offset(robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- return r;
+ return 0;
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,16 +266,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
- uint32_t cur_placement;
+ uint32_t cur_placement = 0;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_busy_domain(robj, &cur_placement);
+ r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
+ radeon_hdp_flush(robj->rdev);
return r;
}
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_set_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r = 0;
DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL)
return -EINVAL;
robj = gobj->driver_private;
- radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+ r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
@@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_get_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
int r = 0;
DRM_DEBUG("\n");
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -EINVAL;
- robj = gobj->driver_private;
- radeon_object_get_tiling_flags(robj, &args->tiling_flags,
- &args->pitch);
+ rbo = gobj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+ radeon_bo_unreserve(rbo);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index dd438d32e5c..da3da1e89d0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
- struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
- struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
*/
- if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
- if (rec->a_clk_reg == RADEON_GPIO_MONID) {
- WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
- R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
- } else {
- WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
- R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ if (rec->hw_capable) {
+ if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+ if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+ } else {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ }
}
}
- if (lock_state) {
- temp = RREG32(rec->a_clk_reg);
- temp &= ~(rec->a_clk_mask);
- WREG32(rec->a_clk_reg, temp);
-
- temp = RREG32(rec->a_data_reg);
- temp &= ~(rec->a_data_mask);
- WREG32(rec->a_data_reg, temp);
- }
+ /* clear the output pin values */
+ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, temp);
+
+ temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, temp);
+
+ /* set the pins to input */
+ temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, temp);
+
+ temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, temp);
+
+ /* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg);
if (lock_state)
temp |= rec->mask_clk_mask;
@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_clk_reg);
- val &= rec->get_clk_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_clk_reg);
+ val &= rec->y_clk_mask;
return (val != 0);
}
@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_data_reg);
- val &= rec->get_data_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_data_reg);
+ val &= rec->y_data_mask;
+
return (val != 0);
}
@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
- val |= clock ? 0 : rec->put_clk_mask;
- WREG32(rec->put_clk_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ val |= clock ? 0 : rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, val);
}
static void set_data(void *i2c_priv, int data)
@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
- val |= data ? 0 : rec->put_data_mask;
- WREG32(rec->put_data_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ val |= data ? 0 : rec->en_data_mask;
+ WREG32(rec->en_data_reg, val);
}
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
- struct radeon_i2c_bus_rec *rec,
- const char *name)
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
{
struct radeon_i2c_chan *i2c;
int ret;
@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
return NULL;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.algo_data = &i2c->algo;
i2c->dev = dev;
- i2c->algo.setsda = set_data;
- i2c->algo.setscl = set_clock;
- i2c->algo.getsda = get_data;
- i2c->algo.getscl = get_clock;
- i2c->algo.udelay = 20;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.setsda = set_data;
+ i2c->algo.bit.setscl = set_clock;
+ i2c->algo.bit.getsda = get_data;
+ i2c->algo.bit.getscl = get_clock;
+ i2c->algo.bit.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
- i2c->algo.timeout = 2;
- i2c->algo.data = i2c;
+ i2c->algo.bit.timeout = 2;
+ i2c->algo.bit.data = i2c;
i2c->rec = *rec;
- i2c_set_adapdata(&i2c->adapter, i2c);
-
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_INFO("Failed to register i2c %s\n", name);
@@ -194,6 +206,38 @@ out_free:
}
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct radeon_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+ if (i2c == NULL)
+ return NULL;
+
+ i2c->rec = *rec;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ i2c->adapter.algo_data = &i2c->algo.dp;
+ i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+ i2c->algo.dp.address = 0;
+ ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_INFO("Failed to register i2c %s\n", name);
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+
+}
+
+
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
return NULL;
}
+
+void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
+{
+ u8 out_buf[2];
+ u8 in_buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+ } else {
+ DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+ addr, *val);
+ }
+}
+
+void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
+{
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = val;
+
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+ DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+ addr, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a0fe6232dcb..9223296fe37 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
return radeon_irq_process(rdev);
}
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ hotplug_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ }
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_sysfs_hotplug_event(dev);
+}
+
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
unsigned i;
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
for (i = 0; i < 2; i++) {
@@ -87,17 +108,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_SINGLE_CRTC)
num_crtc = 1;
-
+ spin_lock_init(&rdev->irq.sw_lock);
r = drm_vblank_init(rdev->ddev, num_crtc);
if (r) {
return r;
}
/* enable msi */
rdev->msi_enabled = 0;
- if (rdev->family >= CHIP_RV380) {
+ /* MSIs don't seem to work on my rs780;
+ * not sure about rs880 or other rs780s.
+ * Needs more investigation.
+ */
+ if ((rdev->family >= CHIP_RV380) &&
+ (rdev->family != CHIP_RS780) &&
+ (rdev->family != CHIP_RS880)) {
int ret = pci_enable_msi(rdev->pdev);
- if (!ret)
+ if (!ret) {
rdev->msi_enabled = 1;
+ DRM_INFO("radeon: using MSI.\n");
+ }
}
drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
@@ -114,3 +143,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
pci_disable_msi(rdev->pdev);
}
}
+
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+ rdev->irq.sw_int = true;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+ rdev->irq.sw_int = false;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ba128621057..f23b05606eb 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,10 +30,19 @@
#include "radeon.h"
#include "radeon_drm.h"
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev == NULL)
+ return 0;
+ radeon_modeset_fini(rdev);
+ radeon_device_fini(rdev);
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+}
-/*
- * Driver load/unload
- */
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct radeon_device *rdev;
@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/
r = radeon_device_init(rdev, dev, dev->pdev, flags);
if (r) {
- DRM_ERROR("Fatal error while trying to initialize radeon.\n");
- return r;
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ goto out;
}
/* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities
* for shadowfb to run
*/
r = radeon_modeset_init(rdev);
- if (r) {
- return r;
- }
- return 0;
-}
-
-int radeon_driver_unload_kms(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev == NULL)
- return 0;
- radeon_modeset_fini(rdev);
- radeon_device_fini(rdev);
- kfree(rdev);
- dev->dev_private = NULL;
- return 0;
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+out:
+ if (r)
+ radeon_driver_unload_kms(dev);
+ return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8d0b7aa87fa..b82ede98e15 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -30,6 +30,18 @@
#include "radeon.h"
#include "atom.h"
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
uint32_t mask;
if (radeon_crtc->crtc_id)
- mask = (RADEON_CRTC2_EN |
- RADEON_CRTC2_DISP_DIS |
+ mask = (RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_VSYNC_DIS |
RADEON_CRTC2_HSYNC_DIS |
RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
uint64_t base;
uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
uint32_t crtc_pitch, pitch_pixels;
uint32_t tiling_flags;
int format;
uint32_t gen_cntl_reg, gen_cntl_val;
+ int r;
DRM_DEBUG("\n");
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
@@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return false;
}
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+ if (tiling_flags & RADEON_TILING_MICRO)
+ DRM_ERROR("trying to scanout microtiled buffer\n");
+
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
(crtc->fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
- if (tiling_flags & RADEON_TILING_MICRO)
- DRM_ERROR("trying to scanout microtiled buffer\n");
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
@@ -530,7 +556,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */
@@ -642,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
- /* check to see if TV DAC is enabled for another crtc and keep it enabled */
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
- crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
- else
- crtc2_gen_cntl = 0;
-
+ /* if TV DAC is enabled for another crtc and keep it enabled */
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
crtc2_gen_cntl |= ((format << 8)
| RADEON_CRTC2_VSYNC_DIS
| RADEON_CRTC2_HSYNC_DIS
@@ -676,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc_ext_cntl;
uint32_t disp_merge_cntl;
- crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+ crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
| (format << 8)
| RADEON_CRTC_DISP_REQ_EN_B
| ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -779,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
- if (lvds) {
- if (lvds->use_bios_dividers) {
- pll_ref_div = lvds->panel_ref_divider;
- pll_fb_post_div = (lvds->panel_fb_divider |
- (lvds->panel_post_divider << 16));
- htotal_cntl = 0;
- use_bios_divs = true;
+ if (!rdev->is_atom_bios) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ if (lvds->use_bios_dividers) {
+ pll_ref_div = lvds->panel_ref_divider;
+ pll_fb_post_div = (lvds->panel_fb_divider |
+ (lvds->panel_post_divider << 16));
+ htotal_cntl = 0;
+ use_bios_divs = true;
+ }
}
}
pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1027,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode);
+ radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
} else {
@@ -1042,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * The hardware wedges sometimes if you reconfigure one CRTC
+ * whilst another is running (see fdo bug #24611).
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
}
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * Reenable the CRTCs that should be running.
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+ if (crtci->enabled)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+ }
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 00382122869..df00515e81f 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
- if ((!rdev->is_atom_bios)) {
+ if (rdev->is_atom_bios) {
+ /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+ * need to call that on resume to set up the reg properly.
+ */
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ } else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
(lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
} else
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
- } else
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ }
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
lvds_gen_cntl &= ~(RADEON_LVDS_ON |
RADEON_LVDS_BLON |
@@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
-static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->base.id = mode_id;
+ }
return true;
}
static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.dpms = radeon_legacy_lvds_dpms,
- .mode_fixup = radeon_legacy_lvds_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_lvds_prepare,
.mode_set = radeon_legacy_lvds_mode_set,
.commit = radeon_legacy_lvds_commit,
@@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
.dpms = radeon_legacy_primary_dac_dpms,
- .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_primary_dac_prepare,
.mode_set = radeon_legacy_primary_dac_mode_set,
.commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
.dpms = radeon_legacy_tmds_int_dpms,
- .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_int_prepare,
.mode_set = radeon_legacy_tmds_int_mode_set,
.commit = radeon_legacy_tmds_int_commit,
@@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/*if (mode->clock > 165000)
fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
}
+ if (!radeon_combios_external_tmds_setup(encoder))
+ radeon_external_tmds_setup(encoder);
}
if (radeon_crtc->crtc_id == 0) {
@@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+ if (tmds) {
+ if (tmds->i2c_bus)
+ radeon_i2c_destroy(tmds->i2c_bus);
+ }
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
.dpms = radeon_legacy_tmds_ext_dpms,
- .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_ext_prepare,
.mode_set = radeon_legacy_tmds_ext_mode_set,
.commit = radeon_legacy_tmds_ext_commit,
@@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
- .destroy = radeon_enc_destroy,
+ .destroy = radeon_ext_tmds_enc_destroy,
};
-static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
.dpms = radeon_legacy_tv_dac_dpms,
- .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tv_dac_prepare,
.mode_set = radeon_legacy_tv_dac_mode_set,
.commit = radeon_legacy_tv_dac_commit,
@@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
return tmds;
}
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_ext_tmds *tmds = NULL;
+ bool ret;
+
+ if (rdev->is_atom_bios)
+ return NULL;
+
+ tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+ if (ret == false)
+ radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+ return tmds;
+}
+
void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{
@@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
@@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
if (!rdev->is_atom_bios)
- radeon_combios_get_ext_tmds_info(radeon_encoder);
+ radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ace726aa0d7..44d4b652ea1 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
#include <drm_crtc.h>
#include <drm_mode.h>
#include <drm_edid.h>
+#include <drm_dp_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
@@ -89,24 +90,45 @@ enum radeon_tv_std {
TV_STD_PAL_CN,
};
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ * grabs the gpio pins for software use
+ * 0=not held 1=held
+ * 2. "a" reg and bits
+ * output pin value
+ * 0=low 1=high
+ * 3. "en" reg and bits
+ * sets the pin direction
+ * 0=input 1=output
+ * 4. "y" reg and bits
+ * input pin value
+ * 0=low 1=high
+ */
struct radeon_i2c_bus_rec {
bool valid;
+ /* id used by atom */
+ uint8_t i2c_id;
+ /* can be used with hw i2c engine */
+ bool hw_capable;
+ /* uses multi-media i2c engine */
+ bool mm_i2c;
+ /* regs and bits */
uint32_t mask_clk_reg;
uint32_t mask_data_reg;
uint32_t a_clk_reg;
uint32_t a_data_reg;
- uint32_t put_clk_reg;
- uint32_t put_data_reg;
- uint32_t get_clk_reg;
- uint32_t get_data_reg;
+ uint32_t en_clk_reg;
+ uint32_t en_data_reg;
+ uint32_t y_clk_reg;
+ uint32_t y_data_reg;
uint32_t mask_clk_mask;
uint32_t mask_data_mask;
- uint32_t put_clk_mask;
- uint32_t put_data_mask;
- uint32_t get_clk_mask;
- uint32_t get_data_mask;
uint32_t a_clk_mask;
uint32_t a_data_mask;
+ uint32_t en_clk_mask;
+ uint32_t en_data_mask;
+ uint32_t y_clk_mask;
+ uint32_t y_data_mask;
};
struct radeon_tmds_pll {
@@ -150,9 +172,12 @@ struct radeon_pll {
};
struct radeon_i2c_chan {
- struct drm_device *dev;
struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
+ struct drm_device *dev;
+ union {
+ struct i2c_algo_dp_aux_data dp;
+ struct i2c_algo_bit_data bit;
+ } algo;
struct radeon_i2c_bus_rec rec;
};
@@ -170,6 +195,11 @@ enum radeon_connector_table {
CT_EMAC,
};
+enum radeon_dvo_chip {
+ DVO_SIL164,
+ DVO_SIL1178,
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -261,6 +291,13 @@ struct radeon_encoder_int_tmds {
struct radeon_tmds_pll tmds_pll[4];
};
+struct radeon_encoder_ext_tmds {
+ /* tmds over dvo */
+ struct radeon_i2c_chan *i2c_bus;
+ uint8_t slave_addr;
+ enum radeon_dvo_chip dvo_chip;
+};
+
/* spread spectrum */
struct radeon_atom_ss {
uint16_t percentage;
@@ -302,6 +339,35 @@ struct radeon_encoder {
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
bool linkb;
+ /* displayport */
+ struct radeon_i2c_chan *dp_i2c_bus;
+ u8 dpcd[8];
+ u8 dp_sink_type;
+ int dp_clock;
+ int dp_lane_count;
+};
+
+struct radeon_gpio_rec {
+ bool valid;
+ u8 id;
+ u32 reg;
+ u32 mask;
+};
+
+enum radeon_hpd_id {
+ RADEON_HPD_NONE = 0,
+ RADEON_HPD_1,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+};
+
+struct radeon_hpd {
+ enum radeon_hpd_id hpd;
+ u8 plugged_state;
+ struct radeon_gpio_rec gpio;
};
struct radeon_connector {
@@ -318,6 +384,7 @@ struct radeon_connector {
void *con_priv;
bool dac_load_detect;
uint16_t connector_object_id;
+ struct radeon_hpd hpd;
};
struct radeon_framebuffer {
@@ -325,10 +392,37 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+ struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+extern void dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+ int action, uint8_t lane_num,
+ uint8_t lane_set);
+extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte);
+
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -343,12 +437,24 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p,
int flags);
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
@@ -378,12 +484,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
-bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
extern struct radeon_encoder_primary_dac *
radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_tv_dac *
@@ -395,6 +505,8 @@ extern struct radeon_encoder_tv_dac *
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_primary_dac *
radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -426,16 +538,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f056dadc5c..544e18ffaf2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,100 +34,53 @@
#include "radeon_drm.h"
#include "radeon.h"
-struct radeon_object {
- struct ttm_buffer_object tobj;
- struct list_head list;
- struct radeon_device *rdev;
- struct drm_gem_object *gobj;
- struct ttm_bo_kmap_obj kmap;
- unsigned pin_count;
- uint64_t gpu_addr;
- void *kptr;
- bool is_iomem;
- uint32_t tiling_flags;
- uint32_t pitch;
- int surface_reg;
-};
int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
/*
* To exclude mutual BO access we rely on bo_reserve exclusion, as all
* function are calling it.
*/
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
- return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
-}
+ struct radeon_bo *bo;
-static void radeon_object_unreserve(struct radeon_object *robj)
-{
- ttm_bo_unreserve(&robj->tobj);
+ bo = container_of(tbo, struct radeon_bo, tbo);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_clear_surface_reg(bo);
+ kfree(bo);
}
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
- struct radeon_object *robj;
+ u32 c = 0;
- robj = container_of(tobj, struct radeon_object, tobj);
- list_del_init(&robj->list);
- radeon_object_clear_surface_reg(robj);
- kfree(robj);
+ rbo->placement.fpfn = 0;
+ rbo->placement.lpfn = 0;
+ rbo->placement.placement = rbo->placements;
+ rbo->placement.busy_placement = rbo->placements;
+ if (domain & RADEON_GEM_DOMAIN_VRAM)
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ if (domain & RADEON_GEM_DOMAIN_GTT)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ if (domain & RADEON_GEM_DOMAIN_CPU)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ rbo->placement.num_placement = c;
+ rbo->placement.num_busy_placement = c;
}
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+ unsigned long size, bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr)
{
- /* Default gpu address */
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- if (robj->tobj.mem.mm_node == NULL) {
- return;
- }
- robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
- switch (robj->tobj.mem.mem_type) {
- case TTM_PL_VRAM:
- robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
- break;
- case TTM_PL_TT:
- robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- return;
- }
-}
-
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
-{
- uint32_t flags = 0;
- if (domain & RADEON_GEM_DOMAIN_VRAM) {
- flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
- }
- if (domain & RADEON_GEM_DOMAIN_GTT) {
- flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- }
- if (domain & RADEON_GEM_DOMAIN_CPU) {
- flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
- }
- if (!flags) {
- flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
- }
- return flags;
-}
-
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr)
-{
- struct radeon_object *robj;
+ struct radeon_bo *bo;
enum ttm_bo_type type;
- uint32_t flags;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +91,125 @@ int radeon_object_create(struct radeon_device *rdev,
} else {
type = ttm_bo_type_device;
}
- *robj_ptr = NULL;
- robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
- if (robj == NULL) {
+ *bo_ptr = NULL;
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
return -ENOMEM;
- }
- robj->rdev = rdev;
- robj->gobj = gobj;
- robj->surface_reg = -1;
- INIT_LIST_HEAD(&robj->list);
-
- flags = radeon_object_flags_from_domain(domain);
- r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
- 0, 0, false, NULL, size,
- &radeon_ttm_object_object_destroy);
+ bo->rdev = rdev;
+ bo->gobj = gobj;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, 0, 0, !kernel, NULL, size,
+ &radeon_ttm_bo_destroy);
if (unlikely(r != 0)) {
- /* ttm call radeon_ttm_object_object_destroy if error happen */
- DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
- size, flags, 0);
+ if (r != -ERESTARTSYS)
+ dev_err(rdev->dev,
+ "object_init failed for (%lu, 0x%08X)\n",
+ size, domain);
return r;
}
- *robj_ptr = robj;
+ *bo_ptr = bo;
if (gobj) {
- list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&bo->rdev->gem.mutex);
}
return 0;
}
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
{
+ bool is_iomem;
int r;
- spin_lock(&robj->tobj.lock);
- if (robj->kptr) {
+ if (bo->kptr) {
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- spin_unlock(&robj->tobj.lock);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
if (r) {
return r;
}
- spin_lock(&robj->tobj.lock);
- robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
- spin_unlock(&robj->tobj.lock);
+ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- radeon_object_check_tiling(robj, 0, 0);
+ radeon_bo_check_tiling(bo, 0, 0);
return 0;
}
-void radeon_object_kunmap(struct radeon_object *robj)
+void radeon_bo_kunmap(struct radeon_bo *bo)
{
- spin_lock(&robj->tobj.lock);
- if (robj->kptr == NULL) {
- spin_unlock(&robj->tobj.lock);
+ if (bo->kptr == NULL)
return;
- }
- robj->kptr = NULL;
- spin_unlock(&robj->tobj.lock);
- radeon_object_check_tiling(robj, 0, 0);
- ttm_bo_kunmap(&robj->kmap);
+ bo->kptr = NULL;
+ radeon_bo_check_tiling(bo, 0, 0);
+ ttm_bo_kunmap(&bo->kmap);
}
-void radeon_object_unref(struct radeon_object **robj)
+void radeon_bo_unref(struct radeon_bo **bo)
{
- struct ttm_buffer_object *tobj;
+ struct ttm_buffer_object *tbo;
- if ((*robj) == NULL) {
+ if ((*bo) == NULL)
return;
- }
- tobj = &((*robj)->tobj);
- ttm_bo_unref(&tobj);
- if (tobj == NULL) {
- *robj = NULL;
- }
-}
-
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
-{
- *offset = robj->tobj.addr_space_offset;
- return 0;
+ tbo = &((*bo)->tbo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
}
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr)
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
- uint32_t flags;
- uint32_t tmp;
- int r;
+ int r, i;
- flags = radeon_object_flags_from_domain(domain);
- spin_lock(&robj->tobj.lock);
- if (robj->pin_count) {
- robj->pin_count++;
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
- }
- spin_unlock(&robj->tobj.lock);
+ radeon_ttm_placement_from_domain(bo, domain);
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
- return r;
- }
- tmp = robj->tobj.mem.placement;
- ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
- robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- radeon_object_gpu_addr(robj);
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
- }
- robj->pin_count = 1;
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to pin object.\n");
- }
- radeon_object_unreserve(robj);
+ radeon_ttm_placement_from_domain(bo, domain);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (likely(r == 0)) {
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
+ }
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p pin failed\n", bo);
return r;
}
-void radeon_object_unpin(struct radeon_object *robj)
+int radeon_bo_unpin(struct radeon_bo *bo)
{
- uint32_t flags;
- int r;
+ int r, i;
- spin_lock(&robj->tobj.lock);
- if (!robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
- return;
- }
- robj->pin_count--;
- if (robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- return;
- }
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
- return;
- }
- flags = robj->tobj.mem.placement;
- robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to unpin buffer.\n");
- }
- radeon_object_unreserve(robj);
-}
-
-int radeon_object_wait(struct radeon_object *robj)
-{
- int r = 0;
-
- /* FIXME: should use block reservation instead */
- r = radeon_object_reserve(robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
- return r;
- }
- spin_lock(&robj->tobj.lock);
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, false);
- }
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
- return r;
-}
-
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
-{
- int r = 0;
-
- r = radeon_object_reserve(robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
- return r;
- }
- spin_lock(&robj->tobj.lock);
- *cur_placement = robj->tobj.mem.mem_type;
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, true);
+ if (!bo->pin_count) {
+ dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+ return 0;
}
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
}
-int radeon_object_evict_vram(struct radeon_device *rdev)
+int radeon_bo_evict_vram(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP) {
/* Useless to evict on IGP chips */
@@ -346,30 +218,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
-void radeon_object_force_delete(struct radeon_device *rdev)
+void radeon_bo_force_delete(struct radeon_device *rdev)
{
- struct radeon_object *robj, *n;
+ struct radeon_bo *bo, *n;
struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
}
- DRM_ERROR("Userspace still has active objects !\n");
- list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+ dev_err(rdev->dev, "Userspace still has active objects !\n");
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = robj->gobj;
- DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
- gobj, robj, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
- list_del_init(&robj->list);
- radeon_object_unref(&robj);
+ gobj = bo->gobj;
+ dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+ gobj, bo, (unsigned long)gobj->size,
+ *((unsigned long *)&gobj->refcount));
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_unref(&bo);
gobj->driver_private = NULL;
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
-int radeon_object_init(struct radeon_device *rdev)
+int radeon_bo_init(struct radeon_device *rdev)
{
/* Add an MTRR for the VRAM */
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +256,13 @@ int radeon_object_init(struct radeon_device *rdev)
return radeon_ttm_init(rdev);
}
-void radeon_object_fini(struct radeon_device *rdev)
+void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
}
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head)
{
if (lobj->wdomain) {
list_add(&lobj->list, head);
@@ -397,72 +271,62 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
}
}
-int radeon_object_list_reserve(struct list_head *head)
+int radeon_bo_list_reserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
int r;
list_for_each_entry(lobj, head, list){
- if (!lobj->robj->pin_count) {
- r = radeon_object_reserve(lobj->robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object.\n");
- return r;
- }
- } else {
- }
+ r = radeon_bo_reserve(lobj->bo, false);
+ if (unlikely(r != 0))
+ return r;
}
return 0;
}
-void radeon_object_list_unreserve(struct list_head *head)
+void radeon_bo_list_unreserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
list_for_each_entry(lobj, head, list) {
- if (!lobj->robj->pin_count) {
- radeon_object_unreserve(lobj->robj);
- }
+ /* only unreserve object we successfully reserved */
+ if (radeon_bo_is_reserved(lobj->bo))
+ radeon_bo_unreserve(lobj->bo);
}
}
-int radeon_object_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_object *robj;
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
int r;
- r = radeon_object_list_reserve(head);
+ r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
- radeon_object_list_unreserve(head);
return r;
}
list_for_each_entry(lobj, head, list) {
- robj = lobj->robj;
- if (!robj->pin_count) {
+ bo = lobj->bo;
+ if (!bo->pin_count) {
if (lobj->wdomain) {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->wdomain);
+ radeon_ttm_placement_from_domain(bo,
+ lobj->wdomain);
} else {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->rdomain);
+ radeon_ttm_placement_from_domain(bo,
+ lobj->rdomain);
}
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- true, false);
- if (unlikely(r)) {
- DRM_ERROR("radeon: failed to validate.\n");
+ r = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
+ if (unlikely(r))
return r;
- }
- radeon_object_gpu_addr(robj);
}
- lobj->gpu_offset = robj->gpu_addr;
- lobj->tiling_flags = robj->tiling_flags;
+ lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+ lobj->tiling_flags = bo->tiling_flags;
if (fence) {
- old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
- robj->tobj.sync_obj = radeon_fence_ref(fence);
- robj->tobj.sync_obj_arg = NULL;
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
+ bo->tbo.sync_obj_arg = NULL;
}
if (old_fence) {
radeon_fence_unref(&old_fence);
@@ -471,51 +335,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
return 0;
}
-void radeon_object_list_unvalidate(struct list_head *head)
+void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_fence *old_fence = NULL;
+ struct radeon_bo_list *lobj;
+ struct radeon_fence *old_fence;
- list_for_each_entry(lobj, head, list) {
- old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
- lobj->robj->tobj.sync_obj = NULL;
- if (old_fence) {
- radeon_fence_unref(&old_fence);
+ if (fence)
+ list_for_each_entry(lobj, head, list) {
+ old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
+ if (old_fence == fence) {
+ lobj->bo->tbo.sync_obj = NULL;
+ radeon_fence_unref(&old_fence);
+ }
}
- }
- radeon_object_list_unreserve(head);
-}
-
-void radeon_object_list_clean(struct list_head *head)
-{
- radeon_object_list_unreserve(head);
+ radeon_bo_list_unreserve(head);
}
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &robj->tobj);
+ return ttm_fbdev_mmap(vma, &bo->tbo);
}
-unsigned long radeon_object_size(struct radeon_object *robj)
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{
- return robj->tobj.num_pages << PAGE_SHIFT;
-}
-
-int radeon_object_get_surface_reg(struct radeon_object *robj)
-{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- struct radeon_object *old_object;
+ struct radeon_bo *old_object;
int steal;
int i;
- if (!robj->tiling_flags)
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!bo->tiling_flags)
return 0;
- if (robj->surface_reg >= 0) {
- reg = &rdev->surface_regs[robj->surface_reg];
- i = robj->surface_reg;
+ if (bo->surface_reg >= 0) {
+ reg = &rdev->surface_regs[bo->surface_reg];
+ i = bo->surface_reg;
goto out;
}
@@ -523,10 +380,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
reg = &rdev->surface_regs[i];
- if (!reg->robj)
+ if (!reg->bo)
break;
- old_object = reg->robj;
+ old_object = reg->bo;
if (old_object->pin_count == 0)
steal = i;
}
@@ -537,91 +394,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
return -ENOMEM;
/* find someone with a surface reg and nuke their BO */
reg = &rdev->surface_regs[steal];
- old_object = reg->robj;
+ old_object = reg->bo;
/* blow away the mapping */
DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
- ttm_bo_unmap_virtual(&old_object->tobj);
+ ttm_bo_unmap_virtual(&old_object->tbo);
old_object->surface_reg = -1;
i = steal;
}
- robj->surface_reg = i;
- reg->robj = robj;
+ bo->surface_reg = i;
+ reg->bo = bo;
out:
- radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
- robj->tobj.mem.mm_node->start << PAGE_SHIFT,
- robj->tobj.num_pages << PAGE_SHIFT);
+ radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+ bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+ bo->tbo.num_pages << PAGE_SHIFT);
return 0;
}
-void radeon_object_clear_surface_reg(struct radeon_object *robj)
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- if (robj->surface_reg == -1)
+ if (bo->surface_reg == -1)
return;
- reg = &rdev->surface_regs[robj->surface_reg];
- radeon_clear_surface_reg(rdev, robj->surface_reg);
+ reg = &rdev->surface_regs[bo->surface_reg];
+ radeon_clear_surface_reg(rdev, bo->surface_reg);
- reg->robj = NULL;
- robj->surface_reg = -1;
+ reg->bo = NULL;
+ bo->surface_reg = -1;
}
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch)
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
{
- robj->tiling_flags = tiling_flags;
- robj->pitch = pitch;
+ int r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+ bo->tiling_flags = tiling_flags;
+ bo->pitch = pitch;
+ radeon_bo_unreserve(bo);
+ return 0;
}
-void radeon_object_get_tiling_flags(struct radeon_object *robj,
- uint32_t *tiling_flags,
- uint32_t *pitch)
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ uint32_t *tiling_flags,
+ uint32_t *pitch)
{
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
if (tiling_flags)
- *tiling_flags = robj->tiling_flags;
+ *tiling_flags = bo->tiling_flags;
if (pitch)
- *pitch = robj->pitch;
+ *pitch = bo->pitch;
}
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop)
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop)
{
- if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
if (force_drop) {
- radeon_object_clear_surface_reg(robj);
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
+ if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
if (!has_moved)
return 0;
- if (robj->surface_reg >= 0)
- radeon_object_clear_surface_reg(robj);
+ if (bo->surface_reg >= 0)
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if ((robj->surface_reg >= 0) && !has_moved)
+ if ((bo->surface_reg >= 0) && !has_moved)
return 0;
- return radeon_object_get_surface_reg(robj);
+ return radeon_bo_get_surface_reg(bo);
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 1);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 1);
}
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 0);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb45..f6b69c2c0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,152 @@
#ifndef __RADEON_OBJECT_H__
#define __RADEON_OBJECT_H__
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
-/*
- * TTM.
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type: ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ return RADEON_GEM_DOMAIN_VRAM;
+ case TTM_PL_TT:
+ return RADEON_GEM_DOMAIN_GTT;
+ case TTM_PL_SYSTEM:
+ return RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_wait: don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
*/
-struct radeon_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
- bool mem_global_referenced;
- struct ttm_bo_device bdev;
-};
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+ int r;
+
+retry:
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
+}
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be usefull to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+ return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+ return !!atomic_read(&bo->tbo.reserved);
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.addr_space_offset;
+}
+
+static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+ bool no_wait)
+{
+ int r;
+
+retry:
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ return r;
+ }
+ spin_lock(&bo->tbo.lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.lock);
+ ttm_bo_unreserve(&bo->tbo);
+ if (unlikely(r == -ERESTART))
+ goto retry;
+ return r;
+}
+extern int radeon_bo_create(struct radeon_device *rdev,
+ struct drm_gem_object *gobj, unsigned long size,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head);
+extern int radeon_bo_list_reserve(struct list_head *head);
+extern void radeon_bo_list_unreserve(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, void *fence);
+extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 46146c6a2a0..34b08d307c8 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
int radeon_pm_init(struct radeon_device *rdev)
{
if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for CP !\n");
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
}
return 0;
@@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
+ seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 29ab75903ec..6d0a009dd4a 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -887,6 +887,7 @@
# define RADEON_FP_PANEL_FORMAT (1 << 3)
# define RADEON_FP_EN_TMDS (1 << 7)
# define RADEON_FP_DETECT_SENSE (1 << 8)
+# define RADEON_FP_DETECT_INT_POL (1 << 9)
# define R200_FP_SOURCE_SEL_MASK (3 << 10)
# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
@@ -894,6 +895,7 @@
# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
# define RADEON_FP_SEL_CRTC1 (0 << 13)
# define RADEON_FP_SEL_CRTC2 (1 << 13)
+# define R300_HPD_SEL(x) ((x) << 13)
# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@@ -909,6 +911,7 @@
# define RADEON_FP2_ON (1 << 2)
# define RADEON_FP2_PANEL_FORMAT (1 << 3)
# define RADEON_FP2_DETECT_SENSE (1 << 8)
+# define RADEON_FP2_DETECT_INT_POL (1 << 9)
# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
@@ -988,14 +991,20 @@
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
+# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
+# define RADEON_FP2_DETECT_MASK (1 << 10)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_FP_DETECT_STAT (1 << 4)
+# define RADEON_FP_DETECT_STAT_ACK (1 << 4)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+# define RADEON_FP2_DETECT_STAT (1 << 10)
+# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -1051,20 +1060,25 @@
/* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090
-#define RADEON_I2C_DONE (1<<0)
-#define RADEON_I2C_NACK (1<<1)
-#define RADEON_I2C_HALT (1<<2)
-#define RADEON_I2C_SOFT_RST (1<<5)
-#define RADEON_I2C_DRIVE_EN (1<<6)
-#define RADEON_I2C_DRIVE_SEL (1<<7)
-#define RADEON_I2C_START (1<<8)
-#define RADEON_I2C_STOP (1<<9)
-#define RADEON_I2C_RECEIVE (1<<10)
-#define RADEON_I2C_ABORT (1<<11)
-#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_DONE (1 << 0)
+#define RADEON_I2C_NACK (1 << 1)
+#define RADEON_I2C_HALT (1 << 2)
+#define RADEON_I2C_SOFT_RST (1 << 5)
+#define RADEON_I2C_DRIVE_EN (1 << 6)
+#define RADEON_I2C_DRIVE_SEL (1 << 7)
+#define RADEON_I2C_START (1 << 8)
+#define RADEON_I2C_STOP (1 << 9)
+#define RADEON_I2C_RECEIVE (1 << 10)
+#define RADEON_I2C_ABORT (1 << 11)
+#define RADEON_I2C_GO (1 << 12)
+#define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094
-#define RADEON_I2C_SEL (1<<16)
-#define RADEON_I2C_EN (1<<17)
+#define RADEON_I2C_DATA_COUNT_SHIFT 0
+#define RADEON_I2C_ADDR_COUNT_SHIFT 4
+#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+#define RADEON_I2C_SEL (1 << 16)
+#define RADEON_I2C_EN (1 << 17)
+#define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0
@@ -1072,7 +1086,7 @@
# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
-#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
+#define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8
#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
@@ -1143,15 +1157,16 @@
# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
-#define RADEON_LCD_GPIO_MASK 0x01a0
-#define RADEON_GPIOPAD_EN 0x01a0
-#define RADEON_LCD_GPIO_Y_REG 0x01a4
-#define RADEON_MDGPIO_A_REG 0x01ac
-#define RADEON_MDGPIO_EN_REG 0x01b0
-#define RADEON_MDGPIO_MASK 0x0198
+
#define RADEON_GPIOPAD_MASK 0x0198
#define RADEON_GPIOPAD_A 0x019c
-#define RADEON_MDGPIO_Y_REG 0x01b4
+#define RADEON_GPIOPAD_EN 0x01a0
+#define RADEON_GPIOPAD_Y 0x01a4
+#define RADEON_MDGPIO_MASK 0x01a8
+#define RADEON_MDGPIO_A 0x01ac
+#define RADEON_MDGPIO_EN 0x01b0
+#define RADEON_MDGPIO_Y 0x01b4
+
#define RADEON_MEM_ADDR_CONFIG 0x0148
#define RADEON_MEM_BASE 0x0f10 /* PCI */
#define RADEON_MEM_CNTL 0x0140
@@ -1360,6 +1375,9 @@
#define RADEON_OVR_CLR 0x0230
#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
+#define RADEON_OVR2_CLR 0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
/* first capture unit */
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84..4d12b2d17b4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
/* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
- r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
- true, RADEON_GEM_DOMAIN_GTT,
- false, &rdev->ib_pool.robj);
+ r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ true, RADEON_GEM_DOMAIN_GTT,
+ &rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
- r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
if (r) {
+ radeon_bo_unreserve(rdev->ib_pool.robj);
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
- r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+ r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
+ int r;
+
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
- radeon_object_kunmap(rdev->ib_pool.robj);
- radeon_object_unref(&rdev->ib_pool.robj);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ib_pool.robj);
+ radeon_bo_unpin(rdev->ib_pool.robj);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
+ }
+ radeon_bo_unref(&rdev->ib_pool.robj);
rdev->ib_pool.robj = NULL;
}
mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false,
- &rdev->cp.ring_obj);
+ r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->cp.ring_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.gpu_addr);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_object_kmap(rdev->cp.ring_obj,
+ r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
+ int r;
+
mutex_lock(&rdev->cp.mutex);
if (rdev->cp.ring_obj) {
- radeon_object_kunmap(rdev->cp.ring_obj);
- radeon_object_unpin(rdev->cp.ring_obj);
- radeon_object_unref(&rdev->cp.ring_obj);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->cp.ring_obj);
+ radeon_bo_unpin(rdev->cp.ring_obj);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ }
+ radeon_bo_unref(&rdev->cp.ring_obj);
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 38537d971a3..067167cb39c 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1950,7 +1950,7 @@ static void radeon_apply_surface_regs(int surf_index,
* Note that refcount can be at most 2, since during a free refcount=3
* might mean we have to allocate a new surface which might not always
* be available.
- * For example : we allocate three contigous surfaces ABC. If B is
+ * For example : we allocate three contiguous surfaces ABC. If B is
* freed, we suddenly need two surfaces to store A and C, which might
* not always be available.
*/
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f8a465d9a1c..391c973ec4d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
void radeon_test_moves(struct radeon_device *rdev)
{
- struct radeon_object *vram_obj = NULL;
- struct radeon_object **gtt_obj = NULL;
+ struct radeon_bo *vram_obj = NULL;
+ struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
- false, &vram_obj);
+ r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+ &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
}
-
- r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+ r = radeon_bo_reserve(vram_obj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
goto out_cleanup;
}
-
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_object_create(rdev, NULL, size, true,
- RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
+ r = radeon_bo_create(rdev, NULL, size, true,
+ RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+ r = radeon_bo_reserve(gtt_obj[i], false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object %d\n", i);
goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++)
*gtt_start = gtt_start;
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(vram_obj, &vram_map);
+ r = radeon_bo_kmap(vram_obj, &vram_map);
if (r) {
DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (GTT map 0x%p-0x%p)\n",
i, *vram_start, gtt_start, gtt_map,
gtt_end);
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
goto out_cleanup;
}
*vram_start = vram_start;
}
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object after copy %d\n", i);
goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (VRAM map 0x%p-0x%p)\n",
i, *gtt_start, vram_start, vram_map,
vram_end);
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
goto out_cleanup;
}
}
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
out_cleanup:
if (vram_obj) {
- radeon_object_unpin(vram_obj);
- radeon_object_unref(&vram_obj);
+ if (radeon_bo_is_reserved(vram_obj)) {
+ radeon_bo_unpin(vram_obj);
+ radeon_bo_unreserve(vram_obj);
+ }
+ radeon_bo_unref(&vram_obj);
}
if (gtt_obj) {
for (i = 0; i < n; i++) {
if (gtt_obj[i]) {
- radeon_object_unpin(gtt_obj[i]);
- radeon_object_unref(&gtt_obj[i]);
+ if (radeon_bo_is_reserved(gtt_obj[i])) {
+ radeon_bo_unpin(gtt_obj[i]);
+ radeon_bo_unreserve(gtt_obj[i]);
+ }
+ radeon_bo_unref(&gtt_obj[i]);
}
}
kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
-
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1381e06d6af..5a19d529d1c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.gtt_location;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.vram_location;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0;
}
-static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
{
- uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
-
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ break;
+ case TTM_PL_TT:
default:
- return (cur_placement & ~TTM_PL_MASK_CACHING) |
- TTM_PL_FLAG_SYSTEM |
- TTM_PL_FLAG_CACHED;
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
}
+ *placement = rbo->placement;
}
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- uint32_t proposed_placement;
+ u32 placements;
+ struct ttm_placement placement;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait);
if (unlikely(r)) {
return r;
@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- uint32_t proposed_flags;
+ struct ttm_placement placement;
+ u32 placements;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
- interruptible, no_wait);
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
if (unlikely(r)) {
return r;
}
@@ -378,7 +394,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
new_mem->mem_type == TTM_PL_SYSTEM) ||
(old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT)) {
- /* bind is enought */
+ /* bind is enough */
radeon_move_null(bo, new_mem);
return 0;
}
@@ -407,18 +423,6 @@ memcpy:
return r;
}
-const uint32_t radeon_mem_prios[] = {
- TTM_PL_VRAM,
- TTM_PL_TT,
- TTM_PL_SYSTEM,
-};
-
-const uint32_t radeon_busy_prios[] = {
- TTM_PL_TT,
- TTM_PL_VRAM,
- TTM_PL_SYSTEM,
-};
-
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
}
static struct ttm_bo_driver radeon_bo_driver = {
- .mem_type_prio = radeon_mem_prios,
- .mem_busy_prio = radeon_busy_prios,
- .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
- .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
@@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
- ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+ rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_object_create(rdev, NULL, 256 * 1024, true,
- RADEON_GEM_DOMAIN_VRAM, false,
- &rdev->stollen_vga_memory);
+ r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ &rdev->stollen_vga_memory);
if (r) {
return r;
}
- r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r)
+ return r;
+ r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
if (r) {
- radeon_object_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stollen_vga_memory);
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
- ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+ rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
@@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev)
void radeon_ttm_fini(struct radeon_device *rdev)
{
+ int r;
+
if (rdev->stollen_vga_memory) {
- radeon_object_unpin(rdev->stollen_vga_memory);
- radeon_object_unref(&rdev->stollen_vga_memory);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r == 0) {
+ radeon_bo_unpin(rdev->stollen_vga_memory);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
+ }
+ radeon_bo_unref(&rdev->stollen_vga_memory);
}
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ca037160a58..c1fcdddb6be 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
u32 tmp;
/* Setup GPU memory space */
- tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+ tmp = RREG32(R_00015C_NB_TOM);
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
rs400_gpu_init(rdev);
+ r100_enable_bm(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r = rs400_gart_enable(rdev);
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs400_startup(rdev);
}
@@ -452,7 +454,7 @@ void rs400_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -490,10 +492,9 @@ int rs400_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Get vram informations */
@@ -510,7 +511,7 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f117cd8736..4f8ea426057 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,6 +45,122 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+int rs600_mc_init(struct radeon_device *rdev)
+{
+ /* read back the MC value from the hw */
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
+ rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xffffffffUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+ if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+ if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = rs600_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ else
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ else
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+ rdev->irq.hpd[1] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ rs600_irq_set(rdev);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+ rdev->irq.hpd[1] = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/*
* GART.
*/
@@ -100,40 +216,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
WREG32(R_00004C_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
- (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
- S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+ (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+ S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
for (i = 0; i < 19; i++) {
WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
- S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
- S_00016C_SYSTEM_ACCESS_MODE_MASK(
- V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
- S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
- V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
- S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
- S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
- S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
+ S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+ S_00016C_SYSTEM_ACCESS_MODE_MASK(
+ V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+ S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+ V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+ S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+ S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+ S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
}
-
- /* System context map to GART space */
- WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
-
/* enable first context */
- WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
- S_000102_ENABLE_PAGE_TABLE(1) |
- S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+ S_000102_ENABLE_PAGE_TABLE(1) |
+ S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
/* disable all other contexts */
- for (i = 1; i < 8; i++) {
+ for (i = 1; i < 8; i++)
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
- }
/* setup the page table */
WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
- rdev->gart.table_addr);
+ rdev->gart.table_addr);
+ WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+ WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+ /* System context maps to VRAM space */
+ WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+ WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
/* enable page tables */
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +262,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (r == 0) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -189,6 +310,10 @@ int rs600_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
uint32_t mode_int = 0;
+ u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+ ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+ ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
@@ -199,8 +324,16 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.crtc_vblank_int[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
+ if (rdev->irq.hpd[0]) {
+ hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ }
+ if (rdev->irq.hpd[1]) {
+ hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ }
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
return 0;
}
@@ -208,6 +341,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = ~C_000044_SW_INT;
+ u32 tmp;
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@@ -219,6 +353,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
} else {
*r500_disp_int = 0;
}
@@ -244,6 +388,7 @@ int rs600_irq_process(struct radeon_device *rdev)
{
uint32_t status, msi_rearm;
uint32_t r500_disp_int;
+ bool queue_hotplug = false;
status = rs600_irq_ack(rdev, &r500_disp_int);
if (!status && !r500_disp_int) {
@@ -258,8 +403,18 @@ int rs600_irq_process(struct radeon_device *rdev)
drm_handle_vblank(rdev->ddev, 0);
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
drm_handle_vblank(rdev->ddev, 1);
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -301,9 +456,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs600 ? */
r100_hdp_reset(rdev);
- /* FIXME: is this correct ? */
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -312,9 +465,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
void rs600_vram_info(struct radeon_device *rdev)
{
- /* FIXME: to do or is these values sane ? */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
+
+ rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
}
void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -388,7 +552,6 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -423,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs600_startup(rdev);
}
@@ -445,7 +610,7 @@ void rs600_fini(struct radeon_device *rdev)
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -482,10 +647,9 @@ int rs600_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -493,7 +657,7 @@ int rs600_init(struct radeon_device *rdev)
/* Get vram informations */
rs600_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs600_mc_init(rdev);
if (r)
return r;
rs600_debugfs(rdev);
@@ -505,7 +669,7 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index 81308924859..c1c8f5885cb 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -30,27 +30,12 @@
/* Registers */
#define R_000040_GEN_INT_CNTL 0x000040
-#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0)
-#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1)
-#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE
-#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
-#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
-#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
-#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
-#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
-#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
-#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
-#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
-#define C_000040_SNAPSHOT2 0xFFFFFF7F
-#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
-#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
-#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
-#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
-#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
-#define C_000040_FP2_DETECT 0xFFFFFBFF
-#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
-#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
-#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
+#define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18)
+#define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1)
+#define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF
+#define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19)
+#define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1)
+#define C_000040_GUI_IDLE_MASK 0xFFF7FFFF
#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
@@ -370,7 +355,90 @@
#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
-
+#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF
+#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17)
+#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1)
+#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18)
+#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19)
+#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL 0x007828
+#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838
+#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28
+#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38
+#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00
+#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0)
+#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1)
+#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04
+#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE
+#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10
+#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0)
+#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1)
+#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14
+#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE
+#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
/* MC registers */
#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 27547175cf9..1e22f52d603 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
void rs690_vram_info(struct radeon_device *rdev)
{
- uint32_t tmp;
fixed20_12 a;
rs400_gart_adjust_size(rdev);
- /* DDR for all card after R300 & IGP */
+
rdev->mc.vram_is_ddr = true;
- /* FIXME: is this correct for RS690/RS740 ? */
- tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
- }
+ rdev->mc.vram_width = 128;
+
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
+
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev)
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
}
+static int rs690_mc_init(struct radeon_device *rdev)
+{
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2)
@@ -244,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
- a.full = rfixed_mul(wm->num_line_pair, b);
- request_fifo_depth.full = rfixed_div(a, c);
+ a.full = rfixed_div(b, c);
+ request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
@@ -374,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -383,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
+ wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -605,7 +624,6 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -640,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs690_startup(rdev);
}
@@ -662,7 +682,7 @@ void rs690_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -700,10 +720,9 @@ int rs690_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -711,7 +730,7 @@ int rs690_init(struct radeon_device *rdev)
/* Get vram informations */
rs690_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs690_mc_init(rdev);
if (r)
return r;
rv515_debugfs(rdev);
@@ -723,7 +742,7 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ba68c9fe90a..59632a506b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -478,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -514,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rv515_startup(rdev);
}
@@ -540,11 +541,11 @@ void rv515_fini(struct radeon_device *rdev)
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
- rv370_pcie_gart_fini(rdev);
+ rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -580,10 +581,8 @@ int rv515_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -603,7 +602,7 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
@@ -892,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
- a.full = rfixed_mul(wm->num_line_pair, b);
- request_fifo_depth.full = rfixed_div(a, c);
+ a.full = rfixed_div(b, c);
+ request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
@@ -995,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
- wm->priority_mark.full = rfixed_const(10);
+ wm->priority_mark.full = wm->priority_mark_max.full;
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
+ wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b0efd0ddae7..fbb0357f1ec 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -829,11 +833,11 @@ int rv770_mc_init(struct radeon_device *rdev)
* AGP so that GPU can catch out of VRAM/AGP access
*/
if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enought place before */
+ /* Enough place before */
rdev->mc.vram_location = rdev->mc.gtt_location -
rdev->mc.mc_vram_size;
} else if (tmp > rdev->mc.mc_vram_size) {
- /* Enought place after */
+ /* Enough place after */
rdev->mc.vram_location = rdev->mc.gtt_location +
rdev->mc.gtt_size;
} else {
@@ -870,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
{
int r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
@@ -880,13 +892,26 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
@@ -934,13 +959,19 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
return 0;
}
@@ -975,7 +1006,11 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -998,31 +1033,31 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_irq_kms_init(rdev);
if (r)
return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
r = r600_pcie_gart_init(rdev);
if (r)
return r;
- rdev->accel_working = true;
r = r600_blit_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled blitter (%d).\n", r);
- rdev->accel_working = false;
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
}
+ rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
rv770_suspend(rdev);
@@ -1034,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
@@ -1051,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev)
rv770_suspend(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
@@ -1059,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7a57c..1e138f5bae0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c06252d46..1fbb2eea5e8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
.mode = S_IRUGO
};
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
+ struct ttm_mem_type_manager *man)
+{
+ printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
+ printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
+ printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
+ printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
+ printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
+ printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
+ printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
+ printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
+ man->available_caching);
+ printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
+ man->default_caching);
+ spin_lock(&glob->lru_lock);
+ drm_mm_debug_table(&man->manager, TTM_PFX);
+ spin_unlock(&glob->lru_lock);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bo->glob;
+ struct ttm_mem_type_manager *man;
+ int i, ret, mem_type;
+
+ printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
+ for (i = 0; i < placement->num_placement; i++) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return;
+ man = &bdev->man[mem_type];
+ printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
+ ttm_mem_type_manager_debug(glob, man);
+ }
+}
+
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
@@ -117,12 +179,13 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
ret = wait_event_interruptible(bo->event_queue,
atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
}
return 0;
}
+EXPORT_SYMBOL(ttm_bo_wait_unreserved);
static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
@@ -247,7 +310,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
/*
* Call bo->mutex locked.
*/
-
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +337,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_USER,
glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
+ if (unlikely(bo->ttm == NULL)) {
ret = -ENOMEM;
- break;
+ break;
+ }
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
@@ -328,14 +391,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
- struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
-
- *old_mem = *mem;
+ bo->mem = *mem;
mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, mem->placement,
- TTM_PL_MASK_MEMTYPE);
goto moved;
}
@@ -418,6 +475,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
}
if (bo->mem.mm_node) {
+ bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
@@ -554,24 +612,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait)
{
- int ret = 0;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
- uint32_t proposed_placement;
-
- if (bo->mem.mem_type != mem_type)
- goto out;
+ struct ttm_placement placement;
+ int ret = 0;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- if (ret != -ERESTART) {
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to expire sync object before "
"buffer eviction.\n");
@@ -584,116 +639,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- proposed_placement = bdev->driver->evict_flags(bo);
-
- ret = ttm_bo_mem_space(bo, proposed_placement,
- &evict_mem, interruptible, no_wait);
- if (unlikely(ret != 0 && ret != -ERESTART))
- ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
- &evict_mem, interruptible, no_wait);
-
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+ no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to find memory space for "
"buffer 0x%p eviction.\n", bo);
+ ttm_bo_mem_space_debug(bo, &placement);
+ }
goto out;
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ spin_lock(&glob->lru_lock);
+ if (evict_mem.mm_node) {
+ evict_mem.mm_node->private = NULL;
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ spin_unlock(&glob->lru_lock);
goto out;
}
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_buffer_object *bo;
+ int ret, put_count = 0;
spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
+ bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+ kref_get(&bo->list_kref);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- bo->evicted = true;
-out:
+ if (unlikely(ret != 0))
+ return ret;
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ttm_bo_unreserve(bo);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node **node)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ *node = NULL;
+ do {
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&glob->lru_lock);
+ *node = drm_mm_search_free_in_range(&man->manager,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(*node == NULL)) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ spin_unlock(&glob->lru_lock);
+ } while (*node == NULL);
+ return 0;
+}
+
/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- uint32_t mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
- struct drm_mm_node *node;
- struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct list_head *lru;
- unsigned long num_pages = mem->num_pages;
- int put_count = 0;
+ struct drm_mm_node *node;
int ret;
-retry_pre_get:
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret != 0))
- return ret;
-
- spin_lock(&glob->lru_lock);
do {
- node = drm_mm_search_free(&man->manager, num_pages,
- mem->page_alignment, 1);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+ if (unlikely(ret != 0))
+ return ret;
if (node)
break;
-
- lru = &man->lru;
- if (list_empty(lru))
+ spin_lock(&glob->lru_lock);
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
break;
-
- entry = list_first_entry(lru, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
-
- ret =
- ttm_bo_reserve_locked(entry, interruptible, no_wait,
- false, 0);
-
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(entry);
-
+ }
spin_unlock(&glob->lru_lock);
-
+ ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+ no_wait);
if (unlikely(ret != 0))
return ret;
-
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
-
- ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
-
- ttm_bo_unreserve(entry);
-
- kref_put(&entry->list_kref, ttm_bo_release_list);
- if (ret)
- return ret;
-
- spin_lock(&glob->lru_lock);
} while (1);
-
- if (!node) {
- spin_unlock(&glob->lru_lock);
+ if (node == NULL)
return -ENOMEM;
- }
-
- node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- goto retry_pre_get;
- }
-
- spin_unlock(&glob->lru_lock);
mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
@@ -724,7 +802,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result;
}
-
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed,
uint32_t mem_type,
@@ -757,66 +834,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
* space.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man;
-
- uint32_t num_prios = bdev->driver->num_mem_type_prio;
- const uint32_t *prios = bdev->driver->mem_type_prio;
- uint32_t i;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false;
bool type_ok = false;
- bool has_eagain = false;
+ bool has_erestartsys = false;
struct drm_mm_node *node = NULL;
- int ret;
+ int i, ret;
mem->mm_node = NULL;
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type, proposed_placement,
- &cur_flags);
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags);
if (!type_ok)
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
break;
if (man->has_type && man->use_type) {
type_found = true;
- do {
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret))
- return ret;
-
- spin_lock(&glob->lru_lock);
- node = drm_mm_search_free(&man->manager,
- mem->num_pages,
- mem->page_alignment,
- 1);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- break;
- }
- node = drm_mm_get_block_atomic(node,
- mem->num_pages,
- mem->
- page_alignment);
- spin_unlock(&glob->lru_lock);
- } while (!node);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem,
+ &node);
+ if (unlikely(ret))
+ return ret;
}
if (node)
break;
@@ -826,67 +892,66 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node;
mem->mem_type = mem_type;
mem->placement = cur_flags;
+ if (node)
+ node->private = bo;
return 0;
}
if (!type_found)
return -EINVAL;
- num_prios = bdev->driver->num_mem_busy_prio;
- prios = bdev->driver->mem_busy_prio;
-
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i <= placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
-
if (!man->has_type)
continue;
-
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type,
- proposed_placement, &cur_flags))
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags))
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
- ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
- interruptible, no_wait);
-
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
+ mem->mm_node->private = bo;
return 0;
}
-
- if (ret == -ERESTART)
- has_eagain = true;
+ if (ret == -ERESTARTSYS)
+ has_erestartsys = true;
}
-
- ret = (has_eagain) ? -ERESTART : -ENOMEM;
+ ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
{
- int ret = 0;
-
if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
return -EBUSY;
- ret = wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-
- if (ret == -ERESTARTSYS)
- ret = -ERESTART;
-
- return ret;
+ return wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->cpu_writers) == 0);
}
+EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -899,147 +964,132 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
-
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
-
if (ret)
return ret;
-
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
-
/*
* Determine where to move the buffer.
*/
-
- ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
- interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
if (ret)
goto out_unlock;
-
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
+ mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
return ret;
}
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
- return 0;
- if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
- return 0;
-
- return 1;
+ int i;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ if ((placement->placement[i] & mem->placement &
+ TTM_PL_MASK_CACHING) &&
+ (placement->placement[i] & mem->placement &
+ TTM_PL_MASK_MEM))
+ return i;
+ }
+ return -1;
}
-int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
int ret;
BUG_ON(!atomic_read(&bo->reserved));
- bo->proposed_placement = proposed_placement;
-
- TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
- (unsigned long)proposed_placement,
- (unsigned long)bo->mem.placement);
-
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
/*
* Check whether we need to move buffer.
*/
-
- if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
- ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
- interruptible, no_wait);
- if (ret) {
- if (ret != -ERESTART)
- printk(KERN_ERR TTM_PFX
- "Failed moving buffer. "
- "Proposed placement 0x%08x\n",
- bo->proposed_placement);
- if (ret == -ENOMEM)
- printk(KERN_ERR TTM_PFX
- "Out of aperture space or "
- "DRM memory quota.\n");
+ ret = ttm_bo_mem_compat(placement, &bo->mem);
+ if (ret < 0) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ if (ret)
return ret;
- }
+ } else {
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+ ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ~TTM_PL_MASK_MEMTYPE);
}
-
/*
* We might need to add a TTM.
*/
-
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true);
if (ret)
return ret;
}
- /*
- * Validation has succeeded, move the access and other
- * non-mapping-related flag bits from the proposed flags to
- * the active flags
- */
-
- ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
- ~TTM_PL_MASK_MEMTYPE);
-
return 0;
}
-EXPORT_SYMBOL(ttm_buffer_object_validate);
+EXPORT_SYMBOL(ttm_bo_validate);
-int
-ttm_bo_check_placement(struct ttm_buffer_object *bo,
- uint32_t set_flags, uint32_t clr_flags)
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
{
- uint32_t new_mask = set_flags | clr_flags;
-
- if ((bo->type == ttm_bo_type_user) &&
- (clr_flags & TTM_PL_FLAG_CACHED)) {
- printk(KERN_ERR TTM_PFX
- "User buffers require cache-coherent memory.\n");
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN)) {
- if (new_mask & TTM_PL_FLAG_NO_EVICT) {
- printk(KERN_ERR TTM_PFX "Need to be root to modify"
- " NO_EVICT status.\n");
+ int i;
+
+ if (placement->fpfn || placement->lpfn) {
+ if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
+ printk(KERN_ERR TTM_PFX "Page number range to small "
+ "Need %lu pages, range is [%u, %u]\n",
+ bo->mem.num_pages, placement->fpfn,
+ placement->lpfn);
return -EINVAL;
}
-
- if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
- (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- printk(KERN_ERR TTM_PFX
- "Incompatible memory specification"
- " for NO_EVICT buffer.\n");
- return -EINVAL;
+ }
+ for (i = 0; i < placement->num_placement; i++) {
+ if (!capable(CAP_SYS_ADMIN)) {
+ if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
+ printk(KERN_ERR TTM_PFX "Need to be root to "
+ "modify NO_EVICT status.\n");
+ return -EINVAL;
+ }
+ }
+ }
+ for (i = 0; i < placement->num_busy_placement; i++) {
+ if (!capable(CAP_SYS_ADMIN)) {
+ if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
+ printk(KERN_ERR TTM_PFX "Need to be root to "
+ "modify NO_EVICT status.\n");
+ return -EINVAL;
+ }
}
}
return 0;
}
-int ttm_buffer_object_init(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- uint32_t flags,
- uint32_t page_alignment,
- unsigned long buffer_start,
- bool interruptible,
- struct file *persistant_swap_storage,
- size_t acc_size,
- void (*destroy) (struct ttm_buffer_object *))
+int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interruptible,
+ struct file *persistant_swap_storage,
+ size_t acc_size,
+ void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
unsigned long num_pages;
@@ -1077,29 +1127,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
bo->acc_size = acc_size;
atomic_inc(&bo->glob->bo_count);
- ret = ttm_bo_check_placement(bo, flags, 0ULL);
+ ret = ttm_bo_check_placement(bo, placement);
if (unlikely(ret != 0))
goto out_err;
/*
- * If no caching attributes are set, accept any form of caching.
- */
-
- if ((flags & TTM_PL_MASK_CACHING) == 0)
- flags |= TTM_PL_MASK_CACHING;
-
- /*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
-
if (bo->type == ttm_bo_type_device) {
ret = ttm_bo_setup_vm(bo);
if (ret)
goto out_err;
}
- ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (ret)
goto out_err;
@@ -1112,7 +1154,7 @@ out_err:
return ret;
}
-EXPORT_SYMBOL(ttm_buffer_object_init);
+EXPORT_SYMBOL(ttm_bo_init);
static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
unsigned long num_pages)
@@ -1123,19 +1165,19 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
return glob->ttm_bo_size + 2 * page_array_size;
}
-int ttm_buffer_object_create(struct ttm_bo_device *bdev,
- unsigned long size,
- enum ttm_bo_type type,
- uint32_t flags,
- uint32_t page_alignment,
- unsigned long buffer_start,
- bool interruptible,
- struct file *persistant_swap_storage,
- struct ttm_buffer_object **p_bo)
+int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interruptible,
+ struct file *persistant_swap_storage,
+ struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- int ret;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ int ret;
size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1150,76 +1192,41 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
- page_alignment, buffer_start,
- interruptible,
- persistant_swap_storage, acc_size, NULL);
+ ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+ buffer_start, interruptible,
+ persistant_swap_storage, acc_size, NULL);
if (likely(ret == 0))
*p_bo = bo;
return ret;
}
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
- uint32_t mem_type, bool allow_errors)
-{
- int ret;
-
- spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
-
- if (ret && allow_errors)
- goto out;
-
- if (bo->mem.mem_type == mem_type)
- ret = ttm_bo_evict(bo, mem_type, false, false);
-
- if (ret) {
- if (allow_errors) {
- goto out;
- } else {
- ret = 0;
- printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
- }
- }
-
-out:
- return ret;
-}
-
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- struct list_head *head,
- unsigned mem_type, bool allow_errors)
+ unsigned mem_type, bool allow_errors)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry;
int ret;
- int put_count;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
-
- while (!list_empty(head)) {
- entry = list_first_entry(head, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
- ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
- put_count = ttm_bo_del_from_lru(entry);
+ while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
- BUG_ON(ret);
- ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
- ttm_bo_unreserve(entry);
- kref_put(&entry->list_kref, ttm_bo_release_list);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ printk(KERN_ERR TTM_PFX
+ "Cleanup eviction failed\n");
+ }
+ }
spin_lock(&glob->lru_lock);
}
-
spin_unlock(&glob->lru_lock);
-
return 0;
}
@@ -1246,7 +1253,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0;
if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+ ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager))
@@ -1279,12 +1286,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0;
}
- return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+ return ttm_bo_force_list_clean(bdev, mem_type, true);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_offset, unsigned long p_size)
+ unsigned long p_size)
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
@@ -1314,7 +1321,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type);
return ret;
}
- ret = drm_mm_init(&man->manager, p_offset, p_size);
+ ret = drm_mm_init(&man->manager, 0, p_size);
if (ret)
return ret;
}
@@ -1463,7 +1470,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0))
goto out_no_sys;
@@ -1693,7 +1700,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
ret = wait_event_interruptible
(bo->event_queue, atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue,
atomic_read(&bo->reserved) == 0);
@@ -1722,12 +1729,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ttm_bo_unreserve(bo);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
if (atomic_dec_and_test(&bo->cpu_writers))
wake_up_all(&bo->event_queue);
}
+EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
/**
* A buffer object shrink method that tries to swap out the first
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c70927ecda2..2ecf7d0c64f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#endif
return tmp;
}
+EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long bus_base,
@@ -427,7 +428,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
/*
* We need to use vmap to get the desired page protection
- * or to make the buffer object look contigous.
+ * or to make the buffer object look contiguous.
*/
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
PAGE_KERNEL :
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d04033..609a85a4d85 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_wait(bo, false, true, false);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- retval = (ret != -ERESTART) ?
+ retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
switch (ret) {
case 0:
break;
- case -ERESTART:
- ret = -EINTR;
- goto out_unref;
case -EBUSY:
ret = -EAGAIN;
goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
switch (ret) {
case 0:
break;
- case -ERESTART:
- return -EINTR;
case -EBUSY:
return -EAGAIN;
default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 00000000000..c285c2902d1
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ entry->reserved = false;
+ ttm_bo_unreserve(bo);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+retry:
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ entry->reserved = false;
+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(list);
+ if (ret == -EAGAIN) {
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ } else
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation(list);
+ ret = ttm_bo_wait_cpu(bo, false);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ void *old_sync_obj;
+
+ spin_lock(&bo->lock);
+ old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_arg = entry->new_sync_obj_arg;
+ spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
+ entry->reserved = false;
+ if (old_sync_obj)
+ driver->sync_obj_unref(&old_sync_obj);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 00000000000..f619ebcaa4e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_module.h"
+#include <asm/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#define TTM_WRITE_LOCK_PENDING (1 << 0)
+#define TTM_VT_LOCK_PENDING (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
+#define TTM_VT_LOCK (1 << 3)
+#define TTM_SUSPEND_LOCK (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+ spin_lock_init(&lock->lock);
+ init_waitqueue_head(&lock->queue);
+ lock->rw = 0;
+ lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
+}
+EXPORT_SYMBOL(ttm_lock_init);
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ if (--lock->rw == 0)
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_read_unlock);
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ locked = true;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible)
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_read_lock(lock));
+ else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+ return ret;
+}
+EXPORT_SYMBOL(ttm_read_lock);
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+ bool block = true;
+
+ *locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ block = false;
+ *locked = true;
+ } else if (lock->flags == 0) {
+ block = false;
+ }
+ spin_unlock(&lock->lock);
+
+ return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+ bool locked;
+
+ if (interruptible)
+ ret = wait_event_interruptible
+ (lock->queue, __ttm_read_trylock(lock, &locked));
+ else
+ wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+ if (unlikely(ret != 0)) {
+ BUG_ON(locked);
+ return ret;
+ }
+
+ return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 0;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_write_unlock);
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+ lock->rw = -1;
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ locked = true;
+ } else {
+ lock->flags |= TTM_WRITE_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_write_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ }
+ } else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_write_lock);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 1;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ spin_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ printk(KERN_INFO TTM_PFX "vt unlock.\n");
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_vt_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ return ret;
+ }
+ } else
+ wait_event(lock->queue, __ttm_vt_lock(lock));
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else {
+ lock->vt_holder = tfile;
+ printk(KERN_INFO TTM_PFX "vt lock.\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_vt_lock);
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.hash.key, TTM_REF_USAGE);
+}
+EXPORT_SYMBOL(ttm_vt_unlock);
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_SUSPEND_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+ lock->flags |= TTM_SUSPEND_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+ wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a6bb..f5245c02b8f 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
- if (unlikely(!zone))
- return -ENOMEM;
-
if (si->totalhigh == 0)
return 0;
+ zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ if (unlikely(!zone))
+ return -ENOMEM;
+
mem = si->totalram;
mem *= si->mem_unit;
@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
* No special dma32 zone needed.
*/
- if (mem <= ((uint64_t) 1ULL << 32))
+ if (mem <= ((uint64_t) 1ULL << 32)) {
+ kfree(zone);
return 0;
+ }
/*
* Limit max dma32 memory to 4GB for now
@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
+EXPORT_SYMBOL(ttm_mem_global_free);
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
+EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size)
}
return 0;
}
+EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 00000000000..1099abac824
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+struct ttm_object_file {
+ struct ttm_object_device *tdev;
+ rwlock_t lock;
+ struct list_head ref_list;
+ struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+ rwlock_t object_lock;
+ struct drm_open_hash object_hash;
+ atomic_t object_count;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+ struct drm_hash_item hash;
+ struct list_head head;
+ struct kref kref;
+ struct ttm_base_object *obj;
+ enum ttm_ref_type ref_type;
+ struct ttm_object_file *tfile;
+};
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+ kref_get(&tfile->refcount);
+ return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+ struct ttm_object_file *tfile =
+ container_of(kref, struct ttm_object_file, refcount);
+
+ kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type object_type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ int ret;
+
+ base->shareable = shareable;
+ base->tfile = ttm_object_file_ref(tfile);
+ base->refcount_release = refcount_release;
+ base->ref_obj_release = ref_obj_release;
+ base->object_type = object_type;
+ write_lock(&tdev->object_lock);
+ kref_init(&base->refcount);
+ ret = drm_ht_just_insert_please(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ write_unlock(&tdev->object_lock);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+out_err1:
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+out_err0:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_base_object_init);
+
+static void ttm_release_base(struct kref *kref)
+{
+ struct ttm_base_object *base =
+ container_of(kref, struct ttm_base_object, refcount);
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ write_unlock(&tdev->object_lock);
+ if (base->refcount_release) {
+ ttm_object_file_unref(&base->tfile);
+ base->refcount_release(&base);
+ }
+ write_lock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ *p_base = NULL;
+
+ /*
+ * Need to take the lock here to avoid racing with
+ * users trying to look up the object.
+ */
+
+ write_lock(&tdev->object_lock);
+ (void)kref_put(&base->refcount, &ttm_release_base);
+ write_unlock(&tdev->object_lock);
+}
+EXPORT_SYMBOL(ttm_base_object_unref);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ uint32_t key)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct drm_hash_item *hash;
+ int ret;
+
+ read_lock(&tdev->object_lock);
+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+ if (likely(ret == 0)) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ kref_get(&base->refcount);
+ }
+ read_unlock(&tdev->object_lock);
+
+ if (unlikely(ret != 0))
+ return NULL;
+
+ if (tfile != base->tfile && !base->shareable) {
+ printk(KERN_ERR TTM_PFX
+ "Attempted access of non-shareable object.\n");
+ ttm_base_object_unref(&base);
+ return NULL;
+ }
+
+ return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ int ret = -EINVAL;
+
+ if (existed != NULL)
+ *existed = true;
+
+ while (ret == -EINVAL) {
+ read_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+ if (ret == 0) {
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_get(&ref->kref);
+ read_unlock(&tfile->lock);
+ break;
+ }
+
+ read_unlock(&tfile->lock);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (unlikely(ref == NULL)) {
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ return -ENOMEM;
+ }
+
+ ref->hash.key = base->hash.key;
+ ref->obj = base;
+ ref->tfile = tfile;
+ ref->ref_type = ref_type;
+ kref_init(&ref->kref);
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_insert_item(ht, &ref->hash);
+
+ if (likely(ret == 0)) {
+ list_add_tail(&ref->head, &tfile->ref_list);
+ kref_get(&base->refcount);
+ write_unlock(&tfile->lock);
+ if (existed != NULL)
+ *existed = false;
+ break;
+ }
+
+ write_unlock(&tfile->lock);
+ BUG_ON(ret != -EINVAL);
+
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_ref_object_add);
+
+static void ttm_ref_object_release(struct kref *kref)
+{
+ struct ttm_ref_object *ref =
+ container_of(kref, struct ttm_ref_object, kref);
+ struct ttm_base_object *base = ref->obj;
+ struct ttm_object_file *tfile = ref->tfile;
+ struct drm_open_hash *ht;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+ ht = &tfile->ref_hash[ref->ref_type];
+ (void)drm_ht_remove_item(ht, &ref->hash);
+ list_del(&ref->head);
+ write_unlock(&tfile->lock);
+
+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+ base->ref_obj_release(base, ref->ref_type);
+
+ ttm_base_object_unref(&ref->obj);
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ write_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key, enum ttm_ref_type ref_type)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ int ret;
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, key, &hash);
+ if (unlikely(ret != 0)) {
+ write_unlock(&tfile->lock);
+ return -EINVAL;
+ }
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_put(&ref->kref, ttm_ref_object_release);
+ write_unlock(&tfile->lock);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_ref_object_base_unref);
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+ struct ttm_ref_object *ref;
+ struct list_head *list;
+ unsigned int i;
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ write_lock(&tfile->lock);
+
+ /*
+ * Since we release the lock within the loop, we have to
+ * restart it from the beginning each time.
+ */
+
+ while (!list_empty(&tfile->ref_list)) {
+ list = tfile->ref_list.next;
+ ref = list_entry(list, struct ttm_ref_object, head);
+ ttm_ref_object_release(&ref->kref);
+ }
+
+ for (i = 0; i < TTM_REF_NUM; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ write_unlock(&tfile->lock);
+ ttm_object_file_unref(&tfile);
+}
+EXPORT_SYMBOL(ttm_object_file_release);
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ unsigned int hash_order)
+{
+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ unsigned int i;
+ unsigned int j = 0;
+ int ret;
+
+ if (unlikely(tfile == NULL))
+ return NULL;
+
+ rwlock_init(&tfile->lock);
+ tfile->tdev = tdev;
+ kref_init(&tfile->refcount);
+ INIT_LIST_HEAD(&tfile->ref_list);
+
+ for (i = 0; i < TTM_REF_NUM; ++i) {
+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+ if (ret) {
+ j = i;
+ goto out_err;
+ }
+ }
+
+ return tfile;
+out_err:
+ for (i = 0; i < j; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ kfree(tfile);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_file_init);
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+ *mem_glob,
+ unsigned int hash_order)
+{
+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(tdev == NULL))
+ return NULL;
+
+ tdev->mem_glob = mem_glob;
+ rwlock_init(&tdev->object_lock);
+ atomic_set(&tdev->object_count, 0);
+ ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+ if (likely(ret == 0))
+ return tdev;
+
+ kfree(tdev);
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_device_init);
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+ struct ttm_object_device *tdev = *p_tdev;
+
+ *p_tdev = NULL;
+
+ write_lock(&tdev->object_lock);
+ drm_ht_remove(&tdev->object_hash);
+ write_unlock(&tdev->object_lock);
+
+ kfree(tdev);
+}
+EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f39ce..9c2b1cc5dba 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
ttm->state = tt_unbound;
return 0;
}
+EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,