aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c29
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c66
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_ioc32.c89
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_mm.c7
-rw-r--r--drivers/gpu/drm/drm_modes.c90
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c5
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c5
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c33
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c272
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h139
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c390
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c132
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c35
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h40
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c732
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c81
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c116
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c13
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c826
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c264
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h105
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c62
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c219
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c216
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c161
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c225
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c121
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c61
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c306
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c678
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c30
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c16
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h801
-rw-r--r--drivers/gpu/drm/radeon/atom.c121
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h199
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c39
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c100
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h9
-rw-r--r--drivers/gpu/drm/radeon/r200.c17
-rw-r--r--drivers/gpu/drm/radeon/r300.c87
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c6
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r420.c48
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c214
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c266
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c87
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c506
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h74
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c166
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c213
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c82
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c23
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r420795
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60068
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5156
-rw-r--r--drivers/gpu/drm/radeon/rs400.c32
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c90
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c152
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c35
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile9
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h1793
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h89
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h201
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1346
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c783
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h521
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c716
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c737
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c538
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c286
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c880
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h102
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c516
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c625
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1187
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c99
-rw-r--r--drivers/gpu/vga/vgaarb.c2
195 files changed, 20011 insertions, 3972 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd17e05..305c5900396 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -66,6 +66,8 @@ config DRM_RADEON
If M is selected, the module will be called radeon.
+source "drivers/gpu/drm/radeon/Kconfig"
+
config DRM_I810
tristate "Intel I810"
depends on DRM && AGP && AGP_INTEL
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 470ef6779db..39c5aa75b8f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
+obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 628eae3e9b8..17be051b7aa 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE,
- gart_info->table_mask);
+ PAGE_SIZE);
if (gart_info->table_handle == NULL)
return -ENOMEM;
@@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+ if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+ DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+ (unsigned long long)gart_info->table_mask);
+ ret = 1;
+ goto done;
+ }
+
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3d09e304f6f..8417cc4c43f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+ dmah = drm_pci_alloc(dev, map->size, map->size);
if (!dmah) {
kfree(map);
return -ENOMEM;
@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
while (entry->buf_count < count) {
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5124401f266..d91fb8c0b7b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4231d6db72e..7d0f00a935f 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_helper_crtc_in_use);
/**
- * drm_disable_unused_functions - disable unused objects
+ * drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* LOCKING:
@@ -702,7 +702,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
- DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
+ DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
mode->name, mode->base.id);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@@ -1032,7 +1032,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
/*
* we shouldn't end up with no modes here.
*/
- WARN(!count, "No connectors reported connected with modes\n");
+ if (count == 0)
+ printk(KERN_INFO "No connectors reported connected with modes\n");
drm_setup_crtcs(dev);
@@ -1162,6 +1163,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
int ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1174,6 +1178,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
}
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ff2f1042cb4..766c46875a2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -434,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data,
* Looks up the ioctl function in the ::ioctls table, checking for root
* previleges if so required, and dispatches to the respective function.
*/
-int drm_ioctl(struct inode *inode, struct file *filp,
+long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
+ struct drm_device *dev;
struct drm_ioctl_desc *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -446,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
char stack_kdata[128];
char *kdata = NULL;
+ dev = file_priv->minor->dev;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
@@ -501,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp,
goto err_i1;
}
}
- retcode = func(dev, kdata, file_priv);
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ lock_kernel();
+ retcode = func(dev, kdata, file_priv);
+ unlock_kernel();
+ }
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c39b26f1abe..ab6c9733041 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
return mode;
}
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+ static const int n_sizes =
+ sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < n_sizes; i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
@@ -633,8 +677,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- printk(KERN_WARNING "integrated sync not supported\n");
- return NULL;
+ printk(KERN_WARNING "composite sync not supported\n");
}
/* it is incorrect if hsync/vsync width is zero */
@@ -681,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_set_name(mode);
- if (pt->misc & DRM_EDID_PT_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
@@ -911,23 +953,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct cvt_timing *cvt;
const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
- int width, height;
+ int uninitialized_var(width), height;
cvt = &(timing->data.other_data.data.cvt[i]);
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
- switch (cvt->code[1] & 0xc0) {
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
case 0x00:
width = height * 4 / 3;
break;
- case 0x40:
+ case 0x04:
width = height * 16 / 9;
break;
- case 0x80:
+ case 0x08:
width = height * 16 / 10;
break;
- case 0xc0:
+ case 0x0c:
width = height * 15 / 9;
break;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b49fa055f4..0f9e90552dc 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
force = DRM_FORCE_ON;
break;
case 'D':
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
force = DRM_FORCE_ON;
else
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
+ drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
@@ -606,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
/* Need to resize the fb object !!! */
- if (var->xres > fb->width || var->yres > fb->height) {
- DRM_ERROR("Requested width/height is greater than current fb "
- "object %dx%d > %dx%d\n", var->xres, var->yres,
- fb->width, fb->height);
- DRM_ERROR("Need resizing code.\n");
+ if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+ fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb481c46..8bf3770f294 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
if (IS_ERR(obj->filp))
goto free;
- /* Basically we want to disable the OOM killer and handle ENOMEM
- * ourselves by sacrificing pages from cached buffers.
- * XXX shmem_file_[gs]et_gfp_mask()
- */
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_FS |
- __GFP_RECLAIMABLE |
- __GFP_NORETRY |
- __GFP_NOWARN |
- __GFP_NOMEMALLOC);
-
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 282d9fdf9f4..d61d185cf04 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
&version->desc))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
+ err = drm_ioctl(file,
DRM_IOCTL_VERSION, (unsigned long)version);
if (err)
return err;
@@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
&u->unique))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+ err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
if (err)
return err;
@@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
&u->unique))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+ return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
}
typedef struct drm_map32 {
@@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
if (__put_user(idx, &map->offset))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_MAP, (unsigned long)map);
+ err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
if (err)
return err;
@@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
|| __put_user(m32.flags, &map->flags))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_ADD_MAP, (unsigned long)map);
+ err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
if (err)
return err;
@@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
if (__put_user((void *)(unsigned long)handle, &map->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RM_MAP, (unsigned long)map);
+ return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
}
typedef struct drm_client32 {
@@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (__put_user(idx, &client->idx))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+ err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
if (err)
return err;
@@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_STATS, (unsigned long)stats);
+ err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
if (err)
return err;
@@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
|| __put_user(agp_start, &buf->agp_start))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+ err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
if (err)
return err;
@@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
|| __put_user(b32.high_mark, &buf->high_mark))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+ return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
}
typedef struct drm_buf_info32 {
@@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
|| __put_user(list, &request->list))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
if (err)
return err;
@@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
|| __put_user(list, &request->list))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
if (err)
return err;
@@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
&request->list))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
}
typedef struct drm_ctx_priv_map32 {
@@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
&request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
}
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
@@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
if (__put_user(ctx_id, &request->ctx_id))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
if (err)
return err;
@@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
&res->contexts))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RES_CTX, (unsigned long)res);
+ err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
if (err)
return err;
@@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
&d->request_sizes))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_DMA, (unsigned long)d);
+ err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
if (err)
return err;
@@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
if (put_user(m32.mode, &mode->mode))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+ return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
}
typedef struct drm_agp_info32 {
@@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_INFO, (unsigned long)info);
+ err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
if (err)
return err;
@@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
|| __put_user(req32.type, &request->type))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
if (err)
return err;
if (__get_user(req32.handle, &request->handle)
|| __get_user(req32.physical, &request->physical)
|| copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
return -EFAULT;
}
@@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
|| __put_user(handle, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
}
typedef struct drm_agp_binding32 {
@@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
|| __put_user(req32.offset, &request->offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_BIND, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
}
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
@@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
|| __put_user(handle, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
}
#endif /* __OS_HAS_AGP */
@@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
|| __put_user(x, &request->size))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
if (err)
return err;
@@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
|| __put_user(x << PAGE_SHIFT, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SG_FREE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
}
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
@@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
__put_user(update32.data, &request->data))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
return err;
}
#endif
@@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|| __put_user(req32.request.signal, &request->request.signal))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
if (err)
return err;
@@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* than always failing.
*/
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
- return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ return drm_ioctl(filp, cmd, arg);
fn = drm_compat_ioctls[nr];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 7998ee66b31..b98384dbd9a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs = 0;
}
+EXPORT_SYMBOL(drm_vblank_cleanup);
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
}
dev->vblank_disable_allowed = 0;
-
return 0;
err:
@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
+ /* vblank is not initialized (IRQ not installed ?) */
+ if (!dev->num_crtcs)
+ return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index d7d7eac3ddd..2ac074c8f5d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -358,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
- if (size < best_size) {
+ if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
@@ -405,10 +405,11 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
wasted += alignment - tmp;
}
- if (entry->size >= size + wasted) {
+ if (entry->size >= size + wasted &&
+ (entry->start + wasted + size) <= end) {
if (!best_match)
return entry;
- if (size < best_size) {
+ if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6d81a02463a..76d63394c77 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1,9 +1,4 @@
/*
- * The list_sort function is (presumably) licensed under the GPL (see the
- * top level "COPYING" file for details).
- *
- * The remainder of this file is:
- *
* Copyright © 1997-2003 by The XFree86 Project, Inc.
* Copyright © 2007 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
@@ -36,6 +31,7 @@
*/
#include <linux/list.h>
+#include <linux/list_sort.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -855,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
/**
* drm_mode_compare - compare modes for favorability
+ * @priv: unused
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
@@ -868,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
-static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
{
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
@@ -885,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
return diff;
}
-/* FIXME: what we don't have a list sort function? */
-/* list sort from Mark J Roberts (mjr@znex.org) */
-void list_sort(struct list_head *head,
- int (*cmp)(struct list_head *a, struct list_head *b))
-{
- struct list_head *p, *q, *e, *list, *tail, *oldhead;
- int insize, nmerges, psize, qsize, i;
-
- list = head->next;
- list_del(head);
- insize = 1;
- for (;;) {
- p = oldhead = list;
- list = tail = NULL;
- nmerges = 0;
-
- while (p) {
- nmerges++;
- q = p;
- psize = 0;
- for (i = 0; i < insize; i++) {
- psize++;
- q = q->next == oldhead ? NULL : q->next;
- if (!q)
- break;
- }
-
- qsize = insize;
- while (psize > 0 || (qsize > 0 && q)) {
- if (!psize) {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- } else if (!qsize || !q) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else if (cmp(p, q) <= 0) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- }
- if (tail)
- tail->next = e;
- else
- list = e;
- e->prev = tail;
- tail = e;
- }
- p = q;
- }
-
- tail->next = list;
- list->prev = tail;
-
- if (nmerges <= 1)
- break;
-
- insize *= 2;
- }
-
- head->next = list;
- head->prev = list->prev;
- list->prev->next = head;
- list->prev = head;
-}
-
/**
* drm_mode_sort - sort mode list
* @mode_list: list to sort
@@ -975,7 +893,7 @@ void list_sort(struct list_head *head,
*/
void drm_mode_sort(struct list_head *mode_list)
{
- list_sort(mode_list, drm_mode_compare);
+ list_sort(NULL, mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 577094fb199..e68ebf92fa2 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -47,8 +47,7 @@
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
- dma_addr_t maxaddr)
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
@@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
if (align > size)
return NULL;
- if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
- DRM_ERROR("Setting pci dma mask failed\n");
- return NULL;
- }
-
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 9422a74c8b5..81681a07a80 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -408,6 +408,11 @@ static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *i
ch7006_info(client, "Detected version ID: %x\n", val);
+ /* I don't know what this is for, but otherwise I get no
+ * signal.
+ */
+ ch7006_write(client, 0x3d, 0x0);
+
return 0;
fail:
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index 87f5445092e..e447dfb6389 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -427,11 +427,6 @@ void ch7006_state_load(struct i2c_client *client,
ch7006_load_reg(client, state, CH7006_SUBC_INC7);
ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
-
- /* I don't know what this is for, but otherwise I get no
- * signal.
- */
- ch7006_write(client, 0x3d, 0x0);
}
void ch7006_state_save(struct i2c_client *client,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 7d1d88cdf2d..de32d22a8c3 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fabb9a81796..c1e02752e02 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 877bf6cb14a..06bd732e646 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i830_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
};
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 389597e4a62..44f990bed8f 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -70,7 +70,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18476bf0b58..a894ade0309 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
mem = kmap_atomic(pages[page], KM_USER0);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(pages[page], KM_USER0);
+ kunmap_atomic(mem, KM_USER0);
}
}
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
obj = obj_priv->obj;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
DRM_ERROR("Failed to get pages: %d\n", ret);
spin_unlock(&dev_priv->mm.active_list_lock);
@@ -386,34 +386,6 @@ out:
return 0;
}
-static int i915_registers_info(struct seq_file *m, void *data) {
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t reg;
-
-#define DUMP_RANGE(start, end) \
- for (reg=start; reg < end; reg += 4) \
- seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
-
- DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
- DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
- DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
- DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
- DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
- DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
- DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
- DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
- DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
- DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
- DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
- DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
- DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
- DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
-
- return 0;
-}
-
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_regs", i915_registers_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 701bfeac7f5..2307f98349f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ if (IS_I965G(dev))
+ dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
+ 0xf0;
+
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
@@ -731,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto fail_batch_free;
+ }
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
@@ -813,9 +819,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PAGEFLIPPING:
value = 1;
break;
+ case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
+ value = dev_priv->has_gem;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
- param->param);
+ param->param);
return -EINVAL;
}
@@ -1117,7 +1127,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
- unsigned long cfb_base, ll_base;
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
@@ -1200,14 +1211,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
0xff000000;
- if (IS_MOBILE(dev) || IS_I9XX(dev))
- dev_priv->cursor_needs_physical = true;
- else
- dev_priv->cursor_needs_physical = false;
-
- if (IS_I965G(dev) || IS_G33(dev))
- dev_priv->cursor_needs_physical = false;
-
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1257,6 +1260,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto destroy_ringbuffer;
+ intel_modeset_init(dev);
+
ret = drm_irq_install(dev);
if (ret)
goto destroy_ringbuffer;
@@ -1271,8 +1276,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- intel_modeset_init(dev);
-
drm_helper_initial_config(dev);
return 0;
@@ -1360,7 +1363,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
- int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
@@ -1376,8 +1379,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
+ dev_priv->info = (struct intel_device_info *) flags;
/* Add register map (needed for suspend/resume) */
+ mmio_bar = IS_I9XX(dev) ? 0 : 1;
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
@@ -1652,6 +1657,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2fa21786205..cf4cb3e9a0c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#include "drm_pciids.h"
#include <linux/console.h>
#include "drm_crtc_helper.h"
@@ -46,36 +45,149 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0400);
+unsigned int i915_lvds_downclock = 0;
+module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+
static struct drm_driver driver;
-static struct pci_device_id pciidlist[] = {
- i915_PCI_IDS
+#define INTEL_VGA_DEVICE(id, info) { \
+ .class = PCI_CLASS_DISPLAY_VGA << 8, \
+ .class_mask = 0xffff00, \
+ .vendor = 0x8086, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+const static struct intel_device_info intel_i830_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_845g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i85x_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i865g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i915g_info = {
+ .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i915gm_info = {
+ .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945g_info = {
+ .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945gm_info = {
+ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i965g_info = {
+ .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_i965gm_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g33_info = {
+ .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g45_info = {
+ .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_gm45_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_pineview_info = {
+ .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_d_info = {
+ .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_m_info = {
+ .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct pci_device_id pciidlist[] = {
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+ INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ {0, 0, 0}
};
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev || !dev_priv) {
- DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (i915_gem_idle(dev))
+ int error = i915_gem_idle(dev);
+ if (error) {
dev_err(&dev->pdev->dev,
- "GEM idle failed, resume may fail\n");
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
drm_irq_uninstall(dev);
}
@@ -83,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
intel_opregion_free(dev, 1);
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+
+ return 0;
+}
+
+static int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+ int error;
+
+ if (!dev || !dev->dev_private) {
+ DRM_ERROR("dev: %p\n", dev);
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+ error = i915_drm_freeze(dev);
+ if (error)
+ return error;
+
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
-
return 0;
}
-static int i915_resume(struct drm_device *dev)
+static int i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- if (pci_enable_device(dev->pdev))
- return -1;
- pci_set_master(dev->pdev);
+ int error = 0;
i915_restore_state(dev);
@@ -113,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
- if (ret != 0)
- ret = -1;
+ error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
- }
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
}
dev_priv->modeset_on_lid = 0;
- return ret;
+ return error;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ pci_set_master(dev->pdev);
+
+ return i915_drm_thaw(dev);
}
/**
@@ -268,22 +403,73 @@ i915_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
-i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int i915_pm_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int error;
- return i915_suspend(dev, state);
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ error = i915_drm_freeze(drm_dev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
-static int
-i915_pci_resume(struct pci_dev *pdev)
+static int i915_pm_resume(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_resume(dev);
+ return i915_resume(drm_dev);
}
+static int i915_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ return i915_drm_freeze(drm_dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw(drm_dev);
+}
+
+static int i915_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_freeze(drm_dev);
+}
+
+const struct dev_pm_ops i915_pm_ops = {
+ .suspend = i915_pm_suspend,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore = i915_pm_resume,
+};
+
static struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
@@ -303,8 +489,11 @@ static struct drm_driver driver = {
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
+
+ /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
.suspend = i915_suspend,
.resume = i915_resume,
+
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
@@ -329,7 +518,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
@@ -344,10 +533,7 @@ static struct drm_driver driver = {
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
-#ifdef CONFIG_PM
- .resume = i915_pci_resume,
- .suspend = i915_pci_suspend,
-#endif
+ .driver.pm = &i915_pm_ops,
},
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbecac72f5b..b99b6a841d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
struct intel_overlay;
+struct intel_device_info {
+ u8 is_mobile : 1;
+ u8 is_i8xx : 1;
+ u8 is_i915g : 1;
+ u8 is_i9xx : 1;
+ u8 is_i945gm : 1;
+ u8 is_i965g : 1;
+ u8 is_i965gm : 1;
+ u8 is_g33 : 1;
+ u8 need_gfx_hws : 1;
+ u8 is_g4x : 1;
+ u8 is_pineview : 1;
+ u8 is_ironlake : 1;
+ u8 has_fbc : 1;
+ u8 has_rc6 : 1;
+ u8 has_pipe_cxsr : 1;
+ u8 has_hotplug : 1;
+ u8 cursor_needs_physical : 1;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ const struct intel_device_info *info;
+
int has_gem;
void __iomem *regs;
@@ -232,8 +254,6 @@ typedef struct drm_i915_private {
int hangcheck_count;
uint32_t last_acthd;
- bool cursor_needs_physical;
-
struct drm_mm vram;
unsigned long cfb_size;
@@ -263,6 +283,7 @@ typedef struct drm_i915_private {
unsigned int lvds_use_ssc:1;
unsigned int edp_support:1;
int lvds_ssc_freq;
+ int edp_bpp;
struct notifier_block lid_notifier;
@@ -287,8 +308,6 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveRENDERSTANDBY;
- u32 savePWRCTXA;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
@@ -474,6 +493,15 @@ typedef struct drm_i915_private {
struct list_head flushing_list;
/**
+ * List of objects currently pending a GPU write flush.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -561,6 +589,7 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
+ struct drm_connector *int_lvds_connector;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -572,6 +601,8 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
+ /** This object's place on GPU write list */
+ struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;
@@ -703,6 +734,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+extern unsigned int i915_lvds_downclock;
extern void i915_save_display(struct drm_device *dev);
extern void i915_restore_display(struct drm_device *dev);
@@ -794,6 +826,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -843,12 +877,13 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj);
+int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
@@ -860,6 +895,9 @@ void i915_gem_shrinker_exit(void);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
+bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
+ int tiling_mode);
+bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -982,67 +1020,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
extern int i915_wrap_ring(struct drm_device * dev);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
-
-#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
- (dev)->pci_device == 0x27AE)
-#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- (dev)->pci_device == 0x0042 || \
- (dev)->pci_device == 0x0046)
-
-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12)
-
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-
-#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- IS_GM45(dev))
-
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
-
-#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
- (dev)->pci_device == 0x29B2 || \
- (dev)->pci_device == 0x29D2 || \
- (IS_PINEVIEW(dev)))
-
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
+#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
-
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IRONLAKE(dev))
+#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
+#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IRONLAKE(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1054,17 +1058,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
- (IS_I9XX(dev) || IS_GM45(dev)) && \
- !IS_PINEVIEW(dev) && \
- !IS_IRONLAKE(dev))
-#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf2050..ec8a0d7ffa3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
goto fail_unlock;
@@ -321,40 +321,24 @@ fail_unlock:
return ret;
}
-static inline gfp_t
-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
-{
- return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
-}
-
-static inline void
-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
-{
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
-}
-
static int
i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
{
int ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
/* If we've insufficient memory to map in the pages, attempt
* to make some space by throwing out some old buffers.
*/
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
- gfp_t gfp;
ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
- gfp = i915_gem_object_get_page_gfp_mask(obj);
- i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
- ret = i915_gem_object_get_pages(obj);
- i915_gem_object_set_page_gfp_mask (obj, gfp);
+ ret = i915_gem_object_get_pages(obj, 0);
}
return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
goto fail_unlock;
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -2021,9 +2009,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
/* blow away mappings if mapped through GTT */
i915_gem_release_mmap(obj);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -2039,6 +2024,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active);
+ /* release the fence reg _after_ flushing */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2099,8 +2088,8 @@ static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno;
int ret;
+ uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@@ -2122,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
@@ -2229,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
}
int
-i915_gem_object_get_pages(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count, i;
@@ -2255,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
inode = obj->filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
- page = read_mapping_page(mapping, i, NULL);
+ page = read_cache_page_gfp(mapping, i,
+ mapping_gfp_mask (mapping) |
+ __GFP_COLD |
+ gfpmask);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
i915_gem_object_put_pages(obj);
@@ -2578,12 +2573,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- bool retry_alloc = false;
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
int ret;
- if (dev_priv->mm.suspended)
- return -EBUSY;
-
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
@@ -2625,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
- }
- ret = i915_gem_object_get_pages(obj);
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
- }
+ ret = i915_gem_object_get_pages(obj, gfpmask);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
@@ -2643,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev, obj->size);
if (ret) {
/* now try to shrink everyone else */
- if (! retry_alloc) {
- retry_alloc = true;
- goto search_free;
+ if (gfpmask) {
+ gfpmask = 0;
+ goto search_free;
}
return ret;
@@ -2723,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
- obj->write_domain = 0;
+ BUG_ON(obj->write_domain);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -2839,6 +2823,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
return 0;
}
+/*
+ * Prepare buffer for display plane. Use uninterruptible for possible flush
+ * wait, as in modesetting process we're not supposed to be interrupted.
+ */
+int
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+
+ /* Wait on any GPU rendering and flushing to occur. */
+ if (obj_priv->active) {
+#if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+ ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+ if (ret != 0)
+ return ret;
+ }
+
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
+ return 0;
+}
+
/**
* Moves a single object to the CPU read, and possibly write domain.
*
@@ -3198,7 +3233,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry,
+ struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs)
{
struct drm_device *dev = obj->dev;
@@ -3206,12 +3241,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
void __iomem *reloc_page;
+ bool need_fence;
+
+ need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+
+ /* Check fence reg constraints and rebind if necessary */
+ if (need_fence && !i915_obj_fenceable(dev, obj))
+ i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
if (ret)
return ret;
+ /*
+ * Pre-965 chips need a fence register set up in order to
+ * properly handle blits to/from tiled surfaces.
+ */
+ if (need_fence) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ if (ret != -EBUSY && ret != -ERESTARTSYS)
+ DRM_ERROR("Failure to install fence: %d\n",
+ ret);
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
entry->offset = obj_priv->gtt_offset;
/* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3431,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
*/
static int
i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer *exec,
+ struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset)
{
@@ -3463,7 +3521,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
}
static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry **relocs)
{
@@ -3478,8 +3536,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL)
+ if (*relocs == NULL) {
+ DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
return -ENOMEM;
+ }
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,13 +3563,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry *relocs)
{
uint32_t reloc_count = 0, i;
int ret = 0;
+ if (relocs == NULL)
+ return 0;
+
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
int unwritten;
@@ -3536,7 +3599,7 @@ err:
}
static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
uint64_t exec_offset)
{
uint32_t exec_start, exec_len;
@@ -3589,18 +3652,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
}
int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec_list)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_relocation_entry *relocs;
- int ret, ret2, i, pinned = 0;
+ struct drm_i915_gem_relocation_entry *relocs = NULL;
+ int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
@@ -3614,31 +3677,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (exec_list == NULL || object_list == NULL) {
- DRM_ERROR("Failed to allocate exec or object list "
- "for %d buffers\n",
+ if (object_list == NULL) {
+ DRM_ERROR("Failed to allocate object list for %d buffers\n",
args->buffer_count);
ret = -ENOMEM;
goto pre_mutex_err;
}
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- goto pre_mutex_err;
- }
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto pre_mutex_err;
+ }
ret = copy_from_user(cliprects,
(struct drm_clip_rect __user *)
@@ -3680,6 +3733,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec_list[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3688,6 +3743,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3801,16 +3858,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
(void)i915_add_request(dev, file_priv,
dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ if (obj->write_domain)
+ list_move_tail(&obj_priv->gpu_write_list,
+ &dev_priv->mm.gpu_write_list);
+ else
+ list_del_init(&obj_priv->gpu_write_list);
+
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
@@ -3884,8 +3948,101 @@ err:
mutex_unlock(&dev->struct_mutex);
+pre_mutex_err:
+ /* Copy the updated relocations out regardless of current error
+ * state. Failure to update the relocs would mean that the next
+ * time userland calls execbuf, it would do so with presumed offset
+ * state that didn't match the actual object state.
+ */
+ ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+ relocs);
+ if (ret2 != 0) {
+ DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+
+ if (ret == 0)
+ ret = ret2;
+ }
+
+ drm_free_large(object_list);
+ kfree(cliprects);
+
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (!IS_I965G(dev))
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = 0;
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
@@ -3898,25 +4055,62 @@ err:
}
}
- /* Copy the updated relocations out regardless of current error
- * state. Failure to update the relocs would mean that the next
- * time userland calls execbuf, it would do so with presumed offset
- * state that didn't match the actual object state.
- */
- ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
- relocs);
- if (ret2 != 0) {
- DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
- if (ret == 0)
- ret = ret2;
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
}
-pre_mutex_err:
- drm_free_large(object_list);
- drm_free_large(exec_list);
- kfree(cliprects);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+ drm_free_large(exec2_list);
return ret;
}
@@ -3933,19 +4127,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (ret)
return ret;
}
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle tiled surfaces.
- */
- if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
- return ret;
- }
- }
+
obj_priv->pin_count++;
/* If the object is not active and not pending a flush,
@@ -4203,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
+ INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
@@ -4654,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4708,7 +4892,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
@@ -4766,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
if (!obj_priv->phys_obj)
return;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret)
goto out;
@@ -4824,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj_priv->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
DRM_ERROR("failed to get page list\n");
goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c09b..df278b2685b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
/**
- * Returns the size of the fence for a tiled object of the given size.
+ * Returns whether an object is currently fenceable. If not, it may need
+ * to be unbound and have its pitch adjusted.
*/
-static int
-i915_get_fence_size(struct drm_device *dev, int size)
+bool
+i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
{
- int i;
- int start;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (IS_I965G(dev)) {
/* The 965 can have fences at any page boundary. */
- return ALIGN(size, 4096);
+ if (obj->size & 4095)
+ return false;
+ return true;
+ } else if (IS_I9XX(dev)) {
+ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
} else {
- /* Align the size to a power of two greater than the smallest
- * fence size.
- */
- if (IS_I9XX(dev))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
+ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ return false;
+ }
- for (i = start; i < size; i <<= 1)
- ;
+ /* Power of two sized... */
+ if (obj->size & (obj->size - 1))
+ return false;
- return i;
- }
+ /* Objects must be size aligned as well */
+ if (obj_priv->gtt_offset & (obj->size - 1))
+ return false;
+ return true;
}
/* Check pitch constriants for all chips & tiling formats */
-static bool
+bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride & (stride - 1))
return false;
- /* We don't 0handle the aperture area covered by the fence being bigger
- * than the object size.
- */
- if (i915_get_fence_size(dev, size) != size)
- return false;
-
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a251b7..13b028994b2 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
&batchbuffer->cliprects))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_BATCHBUFFER,
+ return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long)batchbuffer);
}
@@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
&cmdbuffer->cliprects))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
+ return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
+ (unsigned long)cmdbuffer);
}
typedef struct drm_i915_irq_emit32 {
@@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
&request->irq_seq))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
+ (unsigned long)request);
}
typedef struct drm_i915_getparam32 {
int param;
@@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
+ (unsigned long)request);
}
typedef struct drm_i915_mem_alloc32 {
@@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
&request->region_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_ALLOC, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
+ (unsigned long)request);
}
drm_ioctl_compat_t *i915_compat_ioctls[] = {
@@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85f4c5de97e..a17d6bdfe63 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,7 +274,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
- u32 new_de_iir, new_gt_iir, new_pch_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
@@ -286,49 +285,58 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
- for (;;) {
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
- break;
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ goto done;
- ret = IRQ_HANDLED;
+ ret = IRQ_HANDLED;
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- new_pch_iir = I915_READ(SDEIIR);
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
- I915_WRITE(DEIIR, de_iir);
- new_de_iir = I915_READ(DEIIR);
- I915_WRITE(GTIIR, gt_iir);
- new_gt_iir = I915_READ(GTIIR);
+ if (gt_iir & GT_USER_INTERRUPT) {
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ }
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
+ if (de_iir & DE_GSE)
+ ironlake_opregion_gse_intr(dev);
- if (gt_iir & GT_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
- }
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ }
- if (de_iir & DE_GSE)
- ironlake_opregion_gse_intr(dev);
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ }
- /* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) &&
- (pch_iir & SDE_HOTPLUG_MASK)) {
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- }
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
- de_iir = new_de_iir;
- gt_iir = new_gt_iir;
- pch_iir = new_pch_iir;
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ /* check event from PCH */
+ if ((de_iir & DE_PCH_EVENT) &&
+ (pch_iir & SDE_HOTPLUG_MASK)) {
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(DEIIR, de_iir);
+
+done:
I915_WRITE(DEIER, de_ier);
(void)I915_READ(DEIER);
@@ -852,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
- if (IS_IRONLAKE(dev))
- return 0;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_I965G(dev))
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@@ -874,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- if (IS_IRONLAKE(dev))
- return;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE |
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ if (IS_IRONLAKE(dev))
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
@@ -1023,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask;
+ dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
@@ -1084,6 +1094,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
(void) I915_READ(IER);
}
+/*
+ * Must be called after intel_modeset_init or hotplug interrupts won't be
+ * enabled correctly.
+ */
int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1106,19 +1120,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- /* Leave other bits alone */
- hotplug_en |= HOTPLUG_EN_MASK;
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ /* Ignore TV since it's buggy */
+
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
- TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
- SDVOB_HOTPLUG_INT_STATUS;
- if (IS_G4X(dev)) {
- dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS |
- HDMIC_HOTPLUG_INT_STATUS |
- HDMID_HOTPLUG_INT_STATUS;
- }
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 974b3cf7061..ab1bd2d3d3b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c
@@ -879,13 +880,6 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
-#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
- HDMIC_HOTPLUG_INT_EN | \
- HDMID_HOTPLUG_INT_EN | \
- SDVOB_HOTPLUG_INT_EN | \
- SDVOC_HOTPLUG_INT_EN | \
- CRT_HOTPLUG_INT_EN)
-
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -982,6 +976,8 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER (1 << 25)
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1751,6 +1747,8 @@
/* Display & cursor control */
+/* dithering flag on Ironlake */
+#define PIPE_ENABLE_DITHER (1 << 4)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
@@ -1818,7 +1816,7 @@
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
-#define DSPFW_CURSORA_SHIFT 16
+#define DSPFW_CURSORA_SHIFT 8
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d5ebb00a9d4..a3b90c9561d 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
- dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
- }
-
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
- I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
- }
-
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f2756774758..15fbc1b5a83 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -33,6 +33,8 @@
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
+static int panel_type;
+
static void *
find_section(struct bdb_header *bdb, int section_id)
{
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
+ panel_type = lvds_options->panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
memset(temp_mode, 0, sizeof(*temp_mode));
}
kfree(temp_mode);
- if (temp_downclock < panel_fixed_mode->clock) {
+ if (temp_downclock < panel_fixed_mode->clock &&
+ i915_lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv,
}
static void
+parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
+ assume 18bpp panel color depth.\n");
+ dev_priv->edp_bpp = 18;
+ }
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp_bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp_bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp_bpp = 30;
+ break;
+ }
+}
+
+static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev)
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 425ac9d7f72..4c18514f6f8 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -98,6 +98,7 @@ struct vbios_data {
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
@@ -426,6 +427,45 @@ struct bdb_driver_features {
u8 custom_vbt_version;
} __attribute__((packed));
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t3;
+ u16 t7;
+ u16 t9;
+ u16 t10;
+ u16 t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ u32 sdrrs_msa_timing_delay;
+ struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
bool intel_init_bios(struct drm_device *dev);
/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9f3d3e56341..79dd4026586 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -548,4 +551,6 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
+
+ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da..b27202d23eb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
- bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
};
#define I8XX_DOT_MIN 25000
@@ -242,38 +240,93 @@ struct intel_limit {
#define IRONLAKE_DOT_MAX 350000
#define IRONLAKE_VCO_MIN 1760000
#define IRONLAKE_VCO_MAX 3510000
-#define IRONLAKE_N_MIN 1
-#define IRONLAKE_N_MAX 5
-#define IRONLAKE_M_MIN 79
-#define IRONLAKE_M_MAX 118
#define IRONLAKE_M1_MIN 12
-#define IRONLAKE_M1_MAX 23
+#define IRONLAKE_M1_MAX 22
#define IRONLAKE_M2_MIN 5
#define IRONLAKE_M2_MAX 9
-#define IRONLAKE_P_SDVO_DAC_MIN 5
-#define IRONLAKE_P_SDVO_DAC_MAX 80
-#define IRONLAKE_P_LVDS_MIN 28
-#define IRONLAKE_P_LVDS_MAX 112
-#define IRONLAKE_P1_MIN 1
-#define IRONLAKE_P1_MAX 8
-#define IRONLAKE_P2_SDVO_DAC_SLOW 10
-#define IRONLAKE_P2_SDVO_DAC_FAST 5
-#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
-#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
+/* We have parameter ranges for different type of outputs. */
+
+/* DAC & HDMI Refclk 120Mhz */
+#define IRONLAKE_DAC_N_MIN 1
+#define IRONLAKE_DAC_N_MAX 5
+#define IRONLAKE_DAC_M_MIN 79
+#define IRONLAKE_DAC_M_MAX 127
+#define IRONLAKE_DAC_P_MIN 5
+#define IRONLAKE_DAC_P_MAX 80
+#define IRONLAKE_DAC_P1_MIN 1
+#define IRONLAKE_DAC_P1_MAX 8
+#define IRONLAKE_DAC_P2_SLOW 10
+#define IRONLAKE_DAC_P2_FAST 5
+
+/* LVDS single-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_S_N_MIN 1
+#define IRONLAKE_LVDS_S_N_MAX 3
+#define IRONLAKE_LVDS_S_M_MIN 79
+#define IRONLAKE_LVDS_S_M_MAX 118
+#define IRONLAKE_LVDS_S_P_MIN 28
+#define IRONLAKE_LVDS_S_P_MAX 112
+#define IRONLAKE_LVDS_S_P1_MIN 2
+#define IRONLAKE_LVDS_S_P1_MAX 8
+#define IRONLAKE_LVDS_S_P2_SLOW 14
+#define IRONLAKE_LVDS_S_P2_FAST 14
+
+/* LVDS dual-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_D_N_MIN 1
+#define IRONLAKE_LVDS_D_N_MAX 3
+#define IRONLAKE_LVDS_D_M_MIN 79
+#define IRONLAKE_LVDS_D_M_MAX 127
+#define IRONLAKE_LVDS_D_P_MIN 14
+#define IRONLAKE_LVDS_D_P_MAX 56
+#define IRONLAKE_LVDS_D_P1_MIN 2
+#define IRONLAKE_LVDS_D_P1_MAX 8
+#define IRONLAKE_LVDS_D_P2_SLOW 7
+#define IRONLAKE_LVDS_D_P2_FAST 7
+
+/* LVDS single-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_S_SSC_N_MIN 1
+#define IRONLAKE_LVDS_S_SSC_N_MAX 2
+#define IRONLAKE_LVDS_S_SSC_M_MIN 79
+#define IRONLAKE_LVDS_S_SSC_M_MAX 126
+#define IRONLAKE_LVDS_S_SSC_P_MIN 28
+#define IRONLAKE_LVDS_S_SSC_P_MAX 112
+#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
+#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
+#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
+
+/* LVDS dual-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_D_SSC_N_MIN 1
+#define IRONLAKE_LVDS_D_SSC_N_MAX 3
+#define IRONLAKE_LVDS_D_SSC_M_MIN 79
+#define IRONLAKE_LVDS_D_SSC_M_MAX 126
+#define IRONLAKE_LVDS_D_SSC_P_MIN 14
+#define IRONLAKE_LVDS_D_SSC_P_MAX 42
+#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
+#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
+#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
+
+/* DisplayPort */
+#define IRONLAKE_DP_N_MIN 1
+#define IRONLAKE_DP_N_MAX 2
+#define IRONLAKE_DP_M_MIN 81
+#define IRONLAKE_DP_M_MAX 90
+#define IRONLAKE_DP_P_MIN 10
+#define IRONLAKE_DP_P_MAX 20
+#define IRONLAKE_DP_P2_FAST 10
+#define IRONLAKE_DP_P2_SLOW 10
+#define IRONLAKE_DP_P2_LIMIT 0
+#define IRONLAKE_DP_P1_MIN 1
+#define IRONLAKE_DP_P1_MAX 2
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
-static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
/* below parameter and function is for G4X Chipset Family*/
@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_display_port = {
@@ -465,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_pineview_lvds = {
@@ -481,46 +525,135 @@ static const intel_limit_t intel_limits_pineview_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_ironlake_sdvo = {
+static const intel_limit_t intel_limits_ironlake_dac = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
+ .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
+ .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_DAC_P2_SLOW,
+ .p2_fast = IRONLAKE_DAC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
- .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
};
-static const intel_limit_t intel_limits_ironlake_lvds = {
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_LVDS_SLOW,
- .p2_fast = IRONLAKE_P2_LVDS_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_display_port = {
+ .dot = { .min = IRONLAKE_DOT_MIN,
+ .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN,
+ .max = IRONLAKE_VCO_MAX},
+ .n = { .min = IRONLAKE_DP_N_MIN,
+ .max = IRONLAKE_DP_N_MAX },
+ .m = { .min = IRONLAKE_DP_M_MIN,
+ .max = IRONLAKE_DP_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN,
+ .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN,
+ .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_DP_P_MIN,
+ .max = IRONLAKE_DP_P_MAX },
+ .p1 = { .min = IRONLAKE_DP_P1_MIN,
+ .max = IRONLAKE_DP_P1_MAX},
+ .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
+ .p2_slow = IRONLAKE_DP_P2_SLOW,
+ .p2_fast = IRONLAKE_DP_P2_FAST },
+ .find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_ironlake_lvds;
+ int refclk = 120;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
+ refclk = 100;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP) {
+ /* LVDS dual channel */
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ HAS_eDP)
+ limit = &intel_limits_ironlake_display_port;
else
- limit = &intel_limits_ironlake_sdvo;
+ limit = &intel_limits_ironlake_dac;
return limit;
}
@@ -737,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
return (err != target);
}
-
-static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-
-{
- struct drm_device *dev = crtc->dev;
- intel_clock_t clock;
- int err = target;
- bool found = false;
-
- memcpy(&clock, best_clock, sizeof(intel_clock_t));
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in Pineview */
- if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
- break;
- for (clock.n = limit->n.min; clock.n <= limit->n.max;
- clock.n++) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- found = true;
- }
- }
- }
- }
-
- return found;
-}
-
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
@@ -791,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ int lvds_reg;
+
+ if (IS_IRONLAKE(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+ if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
@@ -839,6 +938,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
+
+ /* return directly when it is eDP */
+ if (HAS_eDP)
+ return true;
+
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
@@ -857,68 +961,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- intel_clock_t clock;
- int err_most = 47;
- int err_min = 10000;
-
- /* eDP has only 2 clock choice, no n/m/p setting */
- if (HAS_eDP)
- return true;
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- return intel_find_pll_ironlake_dp(limit, crtc, target,
- refclk, best_clock);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- /* based on hardware requriment prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
- this_err = abs((10000 - (target*10000/clock.dot)));
- if (this_err < err_most) {
- *best_clock = clock;
- /* found on first matching */
- goto out;
- } else if (this_err < err_min) {
- *best_clock = clock;
- err_min = this_err;
- }
- }
- }
- }
- }
-out:
- return true;
-}
-
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -989,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1282,7 +1326,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_display_plane(obj);
if (ret != 0) {
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
@@ -1493,6 +1537,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
u32 temp;
int tries = 5, j, n;
+ u32 pipe_bpc;
+
+ temp = I915_READ(pipeconf_reg);
+ pipe_bpc = temp & PIPE_BPC_MASK;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1572,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
+ /*
+ * make the BPC in FDI Rx be consistent with that in
+ * pipeconf reg.
+ */
+ temp &= ~(0x7 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
FDI_SEL_PCDCLK |
FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1720,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
@@ -1697,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ drm_vblank_off(dev, pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -1745,6 +1806,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
+ /* BPC in FDI rx is consistent with that in pipeconf */
+ temp &= ~(0x07 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
I915_READ(fdi_rx_reg);
@@ -1789,7 +1853,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
-
+ temp = I915_READ(transconf_reg);
+ /* BPC in transcoder is consistent with that in pipeconf */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
+ I915_WRITE(transconf_reg, temp);
+ I915_READ(transconf_reg);
udelay(100);
/* disable PCH DPLL */
@@ -2448,7 +2517,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
-const static int latency_ns = 5000;
+static const int latency_ns = 5000;
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
@@ -2559,7 +2628,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2570,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = roundup(sr_entries / cacheline_size, 1);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2598,7 +2671,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2613,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm = 1;
srwm &= 0x3f;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2667,7 +2744,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (HAS_FW_BLC(dev) && sr_hdisplay &&
(!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 6000;
+ static const int sr_latency_ns = 6000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2681,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2906,10 +2987,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
- if (is_lvds && limit->find_reduced_pll &&
- dev_priv->lvds_downclock_avail) {
- memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
- has_reduced_clock = limit->find_reduced_pll(limit, crtc,
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
@@ -2969,6 +3048,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(pipeconf_reg);
+ temp &= ~PIPE_BPC_MASK;
+ if (is_lvds) {
+ int lvds_reg = I915_READ(PCH_LVDS);
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ temp |= PIPE_8BPC;
+ else
+ temp |= PIPE_6BPC;
+ } else if (is_edp) {
+ switch (dev_priv->edp_bpp/3) {
+ case 8:
+ temp |= PIPE_8BPC;
+ break;
+ case 10:
+ temp |= PIPE_10BPC;
+ break;
+ case 6:
+ temp |= PIPE_6BPC;
+ break;
+ case 12:
+ temp |= PIPE_12BPC;
+ break;
+ }
+ } else
+ temp |= PIPE_8BPC;
+ I915_WRITE(pipeconf_reg, temp);
+ I915_READ(pipeconf_reg);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
@@ -3195,7 +3301,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
-
+ /* set the dithering flag */
+ if (IS_I965G(dev)) {
+ if (dev_priv->lvds_dither) {
+ if (IS_IRONLAKE(dev))
+ pipeconf |= PIPE_ENABLE_DITHER;
+ else
+ lvds |= LVDS_ENABLE_DITHER;
+ } else {
+ if (IS_IRONLAKE(dev))
+ pipeconf &= ~PIPE_ENABLE_DITHER;
+ else
+ lvds &= ~LVDS_ENABLE_DITHER;
+ }
+ }
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
@@ -3385,7 +3504,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
- if (!dev_priv->cursor_needs_physical) {
+ if (!dev_priv->info->cursor_needs_physical) {
ret = i915_gem_object_pin(bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3539,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
I915_WRITE(base, addr);
if (intel_crtc->cursor_bo) {
- if (dev_priv->cursor_needs_physical) {
+ if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != bo)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
@@ -3779,125 +3898,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
-void intel_increase_renderclock(struct drm_device *dev, bool schedule)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- /* Restore render clock frequency to original value */
- if (IS_G4X(dev) || IS_I9XX(dev))
- pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
- else if (IS_I85X(dev))
- pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
- DRM_DEBUG_DRIVER("increasing render clock frequency\n");
-
- /* Schedule downclock */
- if (schedule)
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-}
-
-void intel_decrease_renderclock(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- if (IS_G4X(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
- gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I965G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
- gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
- gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I915G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
- gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I85X(dev)) {
- u16 hpllcc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
-
- /* Up to maximum... */
- hpllcc &= ~GC_CLOCK_CONTROL_MASK;
- hpllcc |= GC_CLOCK_133_200;
-
- pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
- }
- DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
-}
-
-/* Note that no increase function is needed for this - increase_renderclock()
- * will also rewrite these bits
- */
-void intel_decrease_displayclock(struct drm_device *dev)
-{
- if (IS_IRONLAKE(dev))
- return;
-
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
- IS_I915GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~0xf0;
- gcfgc |= 0x80;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- }
-}
-
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +4011,6 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
- /* GPU isn't processing, downclock it. */
- if (!dev_priv->busy) {
- intel_decrease_renderclock(dev);
- intel_decrease_displayclock(dev);
- }
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4050,13 +4044,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy) {
+ if (!dev_priv->busy)
dev_priv->busy = true;
- intel_increase_renderclock(dev, true);
- } else {
+ else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
- }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -4089,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *obj;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
};
@@ -4100,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->obj);
- drm_gem_object_unreference(work->obj);
+ i915_gem_object_unpin(work->old_fb_obj);
+ drm_gem_object_unreference(work->pending_flip_obj);
+ drm_gem_object_unreference(work->old_fb_obj);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -4124,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
+ if (work && !work->pending) {
+ obj_priv = work->pending_flip_obj->driver_private;
+ DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
+ obj_priv,
+ atomic_read(&obj_priv->pending_flip));
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -4144,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = work->obj->driver_private;
- if (atomic_dec_and_test(&obj_priv->pending_flip))
+ obj_priv = work->pending_flip_obj->driver_private;
+
+ /* Initial scanout buffer will have a 0 pending flip count */
+ if ((atomic_read(&obj_priv->pending_flip) == 0) ||
+ atomic_dec_and_test(&obj_priv->pending_flip))
DRM_WAKEUP(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
}
@@ -4158,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work)
+ if (intel_crtc->unpin_work) {
intel_crtc->unpin_work->pending = 1;
+ } else {
+ DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -4175,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
- int ret;
+ int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int ret, pipesrc;
RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4187,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
- work->obj = intel_fb->obj;
+ work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
mutex_unlock(&dev->struct_mutex);
@@ -4206,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+ obj->driver_private);
kfree(work);
+ intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
- /* Reference the old fb object for the scheduled work. */
- drm_gem_object_reference(work->obj);
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(work->old_fb_obj);
+ drm_gem_object_reference(obj);
crtc->fb = fb;
i915_gem_object_flush_write_domain(obj);
drm_vblank_get(dev, intel_crtc->pipe);
obj_priv = obj->driver_private;
atomic_inc(&obj_priv->pending_flip);
+ work->pending_flip_obj = obj;
BEGIN_LP_RING(4);
OUT_RING(MI_DISPLAY_FLIP |
@@ -4226,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch);
if (IS_I965G(dev)) {
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
- OUT_RING((fb->width << 16) | fb->height);
+ pipesrc = I915_READ(pipesrc_reg);
+ OUT_RING(pipesrc & 0x0fff0fff);
} else {
OUT_RING(obj_priv->gtt_offset);
OUT_RING(MI_NOOP);
@@ -4400,29 +4414,43 @@ static void intel_setup_outputs(struct drm_device *dev)
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
+ }
- if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B);
+ }
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(SDVOB) & SDVO_DETECTED)
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC);
+ }
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
- if (SUPPORTS_INTEGRATED_HDMI(dev))
+ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC);
- if (SUPPORTS_INTEGRATED_DP(dev))
+ }
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C);
+ }
}
- if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ if (SUPPORTS_INTEGRATED_DP(dev) &&
+ (I915_READ(DP_D) & DP_DETECTED)) {
+ DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
+ }
} else if (IS_I8XX(dev))
intel_dvo_init(dev);
@@ -4527,6 +4555,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_changed = intelfb_probe,
};
+static struct drm_gem_object *
+intel_alloc_power_context(struct drm_device *dev)
+{
+ struct drm_gem_object *pwrctx;
+ int ret;
+
+ pwrctx = drm_gem_object_alloc(dev, 4096);
+ if (!pwrctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(pwrctx, 4096);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return pwrctx;
+
+err_unpin:
+ i915_gem_object_unpin(pwrctx);
+err_unref:
+ drm_gem_object_unreference(pwrctx);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4643,27 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (I915_HAS_RC6(dev)) {
- struct drm_gem_object *pwrctx;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_i915_gem_object *obj_priv = NULL;
if (dev_priv->pwrctx) {
obj_priv = dev_priv->pwrctx->driver_private;
} else {
- pwrctx = drm_gem_object_alloc(dev, 4096);
- if (!pwrctx) {
- DRM_DEBUG("failed to alloc power context, "
- "RC6 disabled\n");
- goto out;
- }
+ struct drm_gem_object *pwrctx;
- ret = i915_gem_object_pin(pwrctx, 4096);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n",
- ret);
- drm_gem_object_unreference(pwrctx);
- goto out;
+ pwrctx = intel_alloc_power_context(dev);
+ if (pwrctx) {
+ dev_priv->pwrctx = pwrctx;
+ obj_priv = pwrctx->driver_private;
}
-
- i915_gem_object_set_to_gtt_domain(pwrctx, 1);
-
- dev_priv->pwrctx = pwrctx;
- obj_priv = pwrctx->driver_private;
}
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
- I915_WRITE(MCHBAR_RENDER_STANDBY,
- I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ if (obj_priv) {
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ }
}
-
-out:
- return;
}
/* Set up chip specific display functions */
@@ -4770,7 +4819,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
del_timer_sync(&intel_crtc->idle_timer);
}
- intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
if (dev_priv->display.disable_fbc)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4e7aa8b7b93..439506cefc1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
-intel_dp_link_required(int pixel_clock)
+intel_dp_link_required(struct drm_device *dev,
+ struct intel_output *intel_output, int pixel_clock)
{
- return pixel_clock * 3;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_eDP(intel_output))
+ return (pixel_clock * dev_priv->edp_bpp) / 8;
+ else
+ return pixel_clock * 3;
}
static int
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
int max_lanes = intel_dp_max_lane_count(intel_output);
- if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
+ > max_link_clock * max_lanes)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
- if (intel_dp_link_required(mode->clock) <= link_avail) {
+ if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
+ <= link_avail) {
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output)
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_output);
}
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given DP is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it is assumed that the given
- * DP is present.
- */
-static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, dp_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- dp_port = 0;
- if (dp_reg == DP_B || dp_reg == PCH_DP_B)
- dp_port = PORT_IDPB;
- else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
- dp_port = PORT_IDPC;
- else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
- dp_port = PORT_IDPD;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not DP, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_DP &&
- p_child->device_type != DEVICE_TYPE_eDP)
- continue;
- /* Find the eDP port */
- if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
- ret = 1;
- break;
- }
- /* Find the DP port */
- if (p_child->dvo_port == dp_port) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
struct intel_dp_priv *dp_priv;
const char *name = NULL;
- if (!dp_is_present_in_vbt(dev, output_reg)) {
- DRM_DEBUG_KMS("DP is not present. Ignore it\n");
- return;
- }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_output)
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_output)) {
- intel_output->crtc_mask = (1 << 1);
+ if (IS_eDP(intel_output))
intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- } else
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -1402,14 +1359,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
case DP_B:
case PCH_DP_B:
+ dev_priv->hotplug_supported_mask |=
+ HDMIB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
+ dev_priv->hotplug_supported_mask |=
+ HDMIC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
+ dev_priv->hotplug_supported_mask |=
+ HDMID_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362..aaabbcbe590 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+ ret = i915_gem_object_pin(fbo, 64*1024);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f04dbbe7d40..0e268deed76 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_hdmi_enc_destroy,
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given HDMI is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the given
- * HDMI is present.
- */
-static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, hdmi_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- if (hdmi_reg == SDVOB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == SDVOC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMIB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == HDMIC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMID)
- hdmi_port = DVO_D;
- else
- return 0;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not HDMI, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_HDMI)
- continue;
- /* Find the HDMI port */
- if (p_child->dvo_port == hdmi_port) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_output *intel_output;
struct intel_hdmi_priv *hdmi_priv;
- if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
- DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
- return;
- }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_output)
@@ -303,21 +253,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (sdvox_reg == SDVOB) {
intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
if (!intel_output->ddc_bus)
goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3118ce274e6..c2e8a45780d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -602,12 +602,47 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
/* Some lid devices report incorrect lid status, assume they're connected */
static const struct dmi_system_id bad_lid_status[] = {
{
+ .ident = "Compaq nx9020",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "3084"),
+ },
+ },
+ {
+ .ident = "Samsung SX20S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
+ DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
+ },
+ },
+ {
.ident = "Aspire One",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
},
},
+ {
+ .ident = "Aspire 1810T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
+ },
+ },
+ {
+ .ident = "PC-81005",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
+ },
+ },
+ {
+ .ident = "Clevo M5x0N",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
+ },
+ },
{ }
};
@@ -622,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
{
enum drm_connector_status status = connector_status_connected;
- if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
status = connector_status_disconnected;
return status;
@@ -679,7 +714,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector = dev_priv->int_lvds_connector;
+ /*
+ * check and update the status of LVDS connector after receiving
+ * the LID nofication event.
+ */
+ if (connector)
+ connector->status = connector->funcs->detect(connector);
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
@@ -854,65 +896,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-#ifdef CONFIG_ACPI
-/*
- * check_lid_device -- check whether @handle is an ACPI LID device.
- * @handle: ACPI device handle
- * @level : depth in the ACPI namespace tree
- * @context: the number of LID device when we find the device
- * @rv: a return value to fill if desired (Not use)
- */
-static acpi_status
-check_lid_device(acpi_handle handle, u32 level, void *context,
- void **return_value)
-{
- struct acpi_device *acpi_dev;
- int *lid_present = context;
-
- acpi_dev = NULL;
- /* Get the acpi device for device handle */
- if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
- /* If there is no ACPI device for handle, return */
- return AE_OK;
- }
-
- if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
- *lid_present = 1;
-
- return AE_OK;
-}
-
-/**
- * check whether there exists the ACPI LID device by enumerating the ACPI
- * device tree.
- */
-static int intel_lid_present(void)
-{
- int lid_present = 0;
-
- if (acpi_disabled) {
- /* If ACPI is disabled, there is no ACPI device tree to
- * check, so assume the LID device would have been present.
- */
- return 1;
- }
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- check_lid_device, NULL, &lid_present, NULL);
-
- return lid_present;
-}
-#else
-static int intel_lid_present(void)
-{
- /* In the absence of ACPI built in, assume that the LID device would
- * have been present.
- */
- return 1;
-}
-#endif
-
/**
* intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
@@ -957,7 +940,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
}
}
mutex_unlock(&dev->mode_config.mutex);
- if (temp_downclock < panel_fixed_mode->clock) {
+ if (temp_downclock < panel_fixed_mode->clock &&
+ i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
@@ -1031,12 +1015,8 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- /*
- * Assume LVDS is present if there's an ACPI lid device or if the
- * device is present in the VBT.
- */
- if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
- DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
+ if (!lvds_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
@@ -1180,6 +1160,8 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
+ /* keep the LVDS connector */
+ dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 24a3dc99716..82678d30ab0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
}
/**
- * Don't check status code from this as it switches the bus back to the
- * SDVO chips which defeats the purpose of doing a bus switch in the first
- * place.
+ * Try to read the response after issuie the DDC switch command. But it
+ * is noted that we must do the action of reading response and issuing DDC
+ * switch command in one I2C transaction. Otherwise when we try to start
+ * another I2C transaction after issuing the DDC bus switch, it will be
+ * switched to the internal SDVO register.
*/
static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
u8 target)
{
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ },
+ /* the following two are to read the response */
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = cmd_buf,
+ },
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ret_value,
+ },
+ };
+
+ intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &target, 1);
+ /* write the DDC switch command argument */
+ intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
+
+ out_buf[0] = SDVO_I2C_OPCODE;
+ out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
+ cmd_buf[0] = SDVO_I2C_CMD_STATUS;
+ cmd_buf[1] = 0;
+ ret_value[0] = 0;
+ ret_value[1] = 0;
+
+ ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
+ if (ret != 3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return;
+ }
+ if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("DDC switch command returns response %d\n",
+ ret_value[0]);
+ return;
+ }
+ return;
}
static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
edid = drm_get_edid(&intel_output->base,
intel_output->ddc_bus);
+ /* This is only applied to SDVO cards with multiple outputs */
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
+ uint8_t saved_ddc, temp_ddc;
+ saved_ddc = sdvo_priv->ddc_bus;
+ temp_ddc = sdvo_priv->ddc_bus >> 1;
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ while(temp_ddc > 1) {
+ sdvo_priv->ddc_bus = temp_ddc;
+ edid = drm_get_edid(&intel_output->base,
+ intel_output->ddc_bus);
+ if (edid) {
+ /*
+ * When we can get the EDID, maybe it is the
+ * correct DDC bus. Update it.
+ */
+ sdvo_priv->ddc_bus = temp_ddc;
+ break;
+ }
+ temp_ddc >>= 1;
+ }
+ if (edid == NULL)
+ sdvo_priv->ddc_bus = saved_ddc;
+ }
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
@@ -2270,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
+ } else if (flags & SDVO_OUTPUT_CVBS0) {
+
+ sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_priv->is_tv = true;
+ intel_output->needs_tv_clock = true;
+ intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2662,6 +2745,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
@@ -2708,10 +2792,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOB/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOC/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
if (intel_output->ddc_bus == NULL)
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 97ee566ef74..ddfe16197b5 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -68,7 +68,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 30d00478dde..c1f877b7bac 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
if (err)
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
}
typedef struct drm_mga_getparam32 {
@@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
&getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
}
typedef struct drm_mga_drm_bootstrap32 {
@@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
|| __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_DMA_BOOTSTRAP,
+ err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
(unsigned long)dma_bootstrap);
if (err)
return err;
@@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d823e631951..1175429da10 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -30,7 +30,7 @@ config DRM_NOUVEAU_DEBUG
via debugfs.
menu "I2C encoder or helper chips"
- depends on DRM
+ depends on DRM && DRM_KMS_HELPER && I2C
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1d90d4d0144..48c290b5da8 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -8,14 +8,15 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_sgdma.o nouveau_dma.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
- nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o \
+ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
+ nouveau_dp.o nouveau_grctx.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
+ nv40_grctx.o \
nv04_instmem.o nv50_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a1..48227e74475 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
{
int result;
- if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+ if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
&result))
return -ENODEV;
NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
- if (result & 0x1) { /* Stamina mode - disable the external GPU */
+ if (result) { /* Ensure that the external GPU is enabled */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+ NULL);
+ } else { /* Stamina mode - disable the external GPU */
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
NULL);
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
NULL);
- } else { /* Ensure that the external GPU is enabled */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
- NULL);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5eec5ed6948..0e9cd1d4913 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -181,43 +181,42 @@ struct methods {
const char desc[8];
void (*loadbios)(struct drm_device *, uint8_t *);
const bool rw;
- int score;
};
static struct methods nv04_methods[] = {
{ "PROM", load_vbios_prom, false },
{ "PRAMIN", load_vbios_pramin, true },
{ "PCIROM", load_vbios_pci, true },
- { }
};
static struct methods nv50_methods[] = {
{ "PRAMIN", load_vbios_pramin, true },
{ "PROM", load_vbios_prom, false },
{ "PCIROM", load_vbios_pci, true },
- { }
};
+#define METHODCNT 3
+
static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct methods *methods, *method;
+ struct methods *methods;
+ int i;
int testscore = 3;
+ int scores[METHODCNT];
if (nouveau_vbios) {
- method = nv04_methods;
- while (method->loadbios) {
- if (!strcasecmp(nouveau_vbios, method->desc))
+ methods = nv04_methods;
+ for (i = 0; i < METHODCNT; i++)
+ if (!strcasecmp(nouveau_vbios, methods[i].desc))
break;
- method++;
- }
- if (method->loadbios) {
+ if (i < METHODCNT) {
NV_INFO(dev, "Attempting to use BIOS image from %s\n",
- method->desc);
+ methods[i].desc);
- method->loadbios(dev, data);
- if (score_vbios(dev, data, method->rw))
+ methods[i].loadbios(dev, data);
+ if (score_vbios(dev, data, methods[i].rw))
return true;
}
@@ -229,28 +228,24 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
else
methods = nv50_methods;
- method = methods;
- while (method->loadbios) {
+ for (i = 0; i < METHODCNT; i++) {
NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
- method->desc);
+ methods[i].desc);
data[0] = data[1] = 0; /* avoid reuse of previous image */
- method->loadbios(dev, data);
- method->score = score_vbios(dev, data, method->rw);
- if (method->score == testscore)
+ methods[i].loadbios(dev, data);
+ scores[i] = score_vbios(dev, data, methods[i].rw);
+ if (scores[i] == testscore)
return true;
- method++;
}
while (--testscore > 0) {
- method = methods;
- while (method->loadbios) {
- if (method->score == testscore) {
+ for (i = 0; i < METHODCNT; i++) {
+ if (scores[i] == testscore) {
NV_TRACE(dev, "Using BIOS image from %s\n",
- method->desc);
- method->loadbios(dev, data);
+ methods[i].desc);
+ methods[i].loadbios(dev, data);
return true;
}
- method++;
}
}
@@ -261,10 +256,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
struct init_tbl_entry {
char *name;
uint8_t id;
- int length;
- int length_offset;
- int length_multiplier;
- bool (*handler)(struct nvbios *, uint16_t, struct init_exec *);
+ int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
struct bit_entry {
@@ -318,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
struct drm_device *dev = bios->dev;
/* C51 has misaligned regs on purpose. Marvellous */
- if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
- NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
- reg);
- return 0;
- }
- /*
- * Warn on C51 regs that have not been verified accessible in
- * mmiotracing
- */
+ if (reg & 0x2 ||
+ (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
+ NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
+
+ /* warn on C51 regs that haven't been verified accessible in tracing */
if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
reg);
- /* Trust the init scripts on G80 */
- if (dev_priv->card_type >= NV_50)
- return 1;
-
- #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
- if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
- return 1;
- if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
- return 1;
- if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
- (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
- WITHIN(reg, 0xc000, 0x48))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
- if (reg == 0x00011014 || reg == 0x00020328)
- return 1;
- if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
- return 1;
+ if (reg >= (8*1024*1024)) {
+ NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
+ return 0;
}
- if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
- return 1;
- if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
- return 1;
- if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
- return 1;
- if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
- return 1;
- if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
- WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
- return 1;
- #undef WITHIN
-
- NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
- return 0;
+ return 1;
}
static bool
@@ -820,7 +771,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
}
}
-static bool
+static int
init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -852,9 +803,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
uint32_t reg = ROM32(bios->data[offset + 7]);
uint8_t config;
uint32_t configval;
+ int len = 11 + count * 4;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
@@ -865,7 +817,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return false;
+ return 0;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -874,10 +826,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, configval);
- return true;
+ return len;
}
-static bool
+static int
init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -912,10 +864,10 @@ init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
iexec->repeat = false;
- return true;
+ return 2;
}
-static bool
+static int
init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -951,9 +903,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
uint32_t reg = ROM32(bios->data[offset + 8]);
uint8_t config;
uint16_t freq;
+ int len = 12 + count * 2;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Shift: 0x%02X, IO Flag Condition: 0x%02X, "
@@ -966,7 +919,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return false;
+ return 0;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -986,10 +939,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
setPLL(bios, reg, freq * 10);
- return true;
+ return len;
}
-static bool
+static int
init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1007,12 +960,12 @@ init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* we're not in repeat mode
*/
if (iexec->repeat)
- return false;
+ return 0;
- return true;
+ return 1;
}
-static bool
+static int
init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1041,7 +994,7 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t crtcdata;
if (!iexec->execute)
- return true;
+ return 11;
BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
"Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
@@ -1060,10 +1013,10 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
crtcdata |= (uint8_t)data;
bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
- return true;
+ return 11;
}
-static bool
+static int
init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1079,10 +1032,10 @@ init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
iexec->execute = !iexec->execute;
- return true;
+ return 1;
}
-static bool
+static int
init_io_flag_condition(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1100,7 +1053,7 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
uint8_t cond = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
if (io_flag_condition_met(bios, offset, cond))
BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
@@ -1109,10 +1062,10 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
iexec->execute = false;
}
- return true;
+ return 2;
}
-static bool
+static int
init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1140,11 +1093,12 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
uint32_t mask = ROM32(bios->data[offset + 9]);
uint32_t data = ROM32(bios->data[offset + 13]);
uint8_t count = bios->data[offset + 17];
+ int len = 18 + count * 2;
uint32_t value;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
"Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
@@ -1164,10 +1118,10 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, controlreg, value);
}
- return true;
+ return len;
}
-static bool
+static int
init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1196,25 +1150,26 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
uint8_t shift = bios->data[offset + 5];
uint8_t count = bios->data[offset + 6];
uint32_t reg = ROM32(bios->data[offset + 7]);
+ int len = 11 + count * 4;
uint8_t config;
uint32_t freq;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
offset, crtcport, crtcindex, mask, shift, count, reg);
if (!reg)
- return true;
+ return len;
config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
if (config > count) {
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return false;
+ return 0;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1224,10 +1179,10 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
setPLL(bios, reg, freq);
- return true;
+ return len;
}
-static bool
+static int
init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1244,16 +1199,16 @@ init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t freq = ROM32(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 9;
BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
offset, reg, freq);
setPLL(bios, reg, freq);
- return true;
+ return 9;
}
-static bool
+static int
init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1277,12 +1232,13 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t count = bios->data[offset + 3];
+ int len = 4 + count * 3;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
"Count: 0x%02X\n",
@@ -1290,7 +1246,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
@@ -1303,7 +1259,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = 1;
msg.buf = &value;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
@@ -1317,14 +1273,14 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = 1;
msg.buf = &value;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
-static bool
+static int
init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1346,12 +1302,13 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t count = bios->data[offset + 3];
+ int len = 4 + count * 2;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
"Count: 0x%02X\n",
@@ -1359,7 +1316,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
@@ -1374,14 +1331,14 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = 1;
msg.buf = &data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
-static bool
+static int
init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1401,13 +1358,14 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t count = bios->data[offset + 3];
+ int len = 4 + count;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
uint8_t data[256];
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
"Count: 0x%02X\n",
@@ -1415,7 +1373,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1429,13 +1387,13 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = count;
msg.buf = data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
}
- return true;
+ return len;
}
-static bool
+static int
init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1460,7 +1418,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t reg, value;
if (!iexec->execute)
- return true;
+ return 5;
BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
@@ -1468,7 +1426,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return false;
+ return 0;
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1476,10 +1434,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, reg + 4, value);
bios_wr32(bios, reg, tmdsaddr);
- return true;
+ return 5;
}
-static bool
+static int
init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1500,18 +1458,19 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
uint8_t mlv = bios->data[offset + 1];
uint8_t count = bios->data[offset + 2];
+ int len = 3 + count * 2;
uint32_t reg;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
offset, mlv, count);
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1521,10 +1480,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, tmdsaddr);
}
- return true;
+ return len;
}
-static bool
+static int
init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1547,11 +1506,12 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
uint8_t crtcindex2 = bios->data[offset + 2];
uint8_t baseaddr = bios->data[offset + 3];
uint8_t count = bios->data[offset + 4];
+ int len = 5 + count;
uint8_t oldaddr, data;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
"BaseAddr: 0x%02X, Count: 0x%02X\n",
@@ -1568,10 +1528,10 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
- return true;
+ return len;
}
-static bool
+static int
init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1592,7 +1552,7 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t value;
if (!iexec->execute)
- return true;
+ return 4;
BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
offset, crtcindex, mask, data);
@@ -1601,10 +1561,10 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
value |= data;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
- return true;
+ return 4;
}
-static bool
+static int
init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1621,14 +1581,14 @@ init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t data = bios->data[offset + 2];
if (!iexec->execute)
- return true;
+ return 3;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
- return true;
+ return 3;
}
-static bool
+static int
init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1645,18 +1605,19 @@ init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t count = bios->data[offset + 1];
+ int len = 2 + count * 2;
int i;
if (!iexec->execute)
- return true;
+ return len;
for (i = 0; i < count; i++)
init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
- return true;
+ return len;
}
-static bool
+static int
init_condition_time(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1680,7 +1641,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
unsigned cnt;
if (!iexec->execute)
- return true;
+ return 3;
if (retries > 100)
retries = 100;
@@ -1711,10 +1672,10 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
iexec->execute = false;
}
- return true;
+ return 3;
}
-static bool
+static int
init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1734,10 +1695,11 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
uint32_t basereg = ROM32(bios->data[offset + 1]);
uint32_t count = bios->data[offset + 5];
+ int len = 6 + count * 4;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
offset, basereg, count);
@@ -1749,10 +1711,10 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, data);
}
- return true;
+ return len;
}
-static bool
+static int
init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1768,7 +1730,7 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint16_t sub_offset = ROM16(bios->data[offset + 1]);
if (!iexec->execute)
- return true;
+ return 3;
BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
offset, sub_offset);
@@ -1777,10 +1739,10 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
- return true;
+ return 3;
}
-static bool
+static int
init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1808,7 +1770,7 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t srcvalue, dstvalue;
if (!iexec->execute)
- return true;
+ return 22;
BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
"Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
@@ -1827,10 +1789,10 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, dstreg, dstvalue | srcvalue);
- return true;
+ return 22;
}
-static bool
+static int
init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1848,14 +1810,14 @@ init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t data = bios->data[offset + 4];
if (!iexec->execute)
- return true;
+ return 5;
bios_idxprt_wr(bios, crtcport, crtcindex, data);
- return true;
+ return 5;
}
-static bool
+static int
init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1903,8 +1865,8 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
- if (dev_priv->card_type >= NV_50)
- return true;
+ if (dev_priv->card_type >= NV_40)
+ return 1;
/*
* On every card I've seen, this step gets done for us earlier in
@@ -1922,10 +1884,10 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
/* write back the saved configuration value */
bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
- return true;
+ return 1;
}
-static bool
+static int
init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1959,10 +1921,10 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
- return true;
+ return 13;
}
-static bool
+static int
init_configure_mem(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1983,7 +1945,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return false;
+ return 0;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2015,10 +1977,10 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, data);
}
- return true;
+ return 1;
}
-static bool
+static int
init_configure_clk(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2038,7 +2000,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return false;
+ return 0;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2048,10 +2010,10 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
clock *= 2;
setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
- return true;
+ return 1;
}
-static bool
+static int
init_configure_preinit(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2071,15 +2033,15 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
if (bios->major_version > 2)
- return false;
+ return 0;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
- return true;
+ return 1;
}
-static bool
+static int
init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2099,7 +2061,7 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t data = bios->data[offset + 4];
if (!iexec->execute)
- return true;
+ return 5;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
offset, crtcport, mask, data);
@@ -2158,15 +2120,15 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
for (i = 0; i < 2; i++)
bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
bios, 0x614108 + (i*0x800)) & 0x0fffffff);
- return true;
+ return 5;
}
bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
data);
- return true;
+ return 5;
}
-static bool
+static int
init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2181,7 +2143,7 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t sub = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
@@ -2191,10 +2153,10 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
- return true;
+ return 2;
}
-static bool
+static int
init_ram_condition(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2215,7 +2177,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
uint8_t data;
if (!iexec->execute)
- return true;
+ return 3;
data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
@@ -2229,10 +2191,10 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
iexec->execute = false;
}
- return true;
+ return 3;
}
-static bool
+static int
init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2251,17 +2213,17 @@ init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t data = ROM32(bios->data[offset + 9]);
if (!iexec->execute)
- return true;
+ return 13;
BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
offset, reg, mask, data);
bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
- return true;
+ return 13;
}
-static bool
+static int
init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2285,7 +2247,7 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
int i;
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
"Count: 0x%02X\n",
@@ -2300,10 +2262,10 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, reg, data);
}
- return true;
+ return 2;
}
-static bool
+static int
init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2315,10 +2277,10 @@ init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
/* mild retval abuse to stop parsing this table */
- return false;
+ return 0;
}
-static bool
+static int
init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2330,15 +2292,15 @@ init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
if (iexec->execute)
- return true;
+ return 1;
iexec->execute = true;
BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
- return true;
+ return 1;
}
-static bool
+static int
init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2353,7 +2315,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
unsigned time = ROM16(bios->data[offset + 1]);
if (!iexec->execute)
- return true;
+ return 3;
BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
offset, time);
@@ -2363,10 +2325,10 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
else
msleep((time + 900) / 1000);
- return true;
+ return 3;
}
-static bool
+static int
init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2383,7 +2345,7 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t cond = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
@@ -2394,10 +2356,10 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
iexec->execute = false;
}
- return true;
+ return 2;
}
-static bool
+static int
init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2414,7 +2376,7 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t cond = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
@@ -2425,10 +2387,10 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
iexec->execute = false;
}
- return true;
+ return 2;
}
-static bool
+static int
init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2451,7 +2413,7 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t value;
if (!iexec->execute)
- return true;
+ return 6;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Data: 0x%02X\n",
@@ -2460,10 +2422,10 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
bios_idxprt_wr(bios, crtcport, crtcindex, value);
- return true;
+ return 6;
}
-static bool
+static int
init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2481,16 +2443,16 @@ init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint16_t freq = ROM16(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 7;
BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
setPLL(bios, reg, freq * 10);
- return true;
+ return 7;
}
-static bool
+static int
init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2507,17 +2469,17 @@ init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t value = ROM32(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 9;
if (reg == 0x000200)
value |= 1;
bios_wr32(bios, reg, value);
- return true;
+ return 9;
}
-static bool
+static int
init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2543,14 +2505,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
uint8_t type = bios->data[offset + 1];
uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
+ int len = 2 + bios->ram_restrict_group_count * 4;
int i;
if (!iexec->execute)
- return true;
+ return len;
if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
NV_ERROR(dev, "PLL limits table not version 3.x\n");
- return true; /* deliberate, allow default clocks to remain */
+ return len; /* deliberate, allow default clocks to remain */
}
entry = pll_limits + pll_limits[1];
@@ -2563,15 +2526,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
offset, type, reg, freq);
setPLL(bios, reg, freq);
- return true;
+ return len;
}
}
NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
- return true;
+ return len;
}
-static bool
+static int
init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2581,10 +2544,10 @@ init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
*/
- return true;
+ return 1;
}
-static bool
+static int
init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2594,10 +2557,10 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
*/
- return true;
+ return 1;
}
-static bool
+static int
init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2615,14 +2578,17 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
const uint8_t *gpio_entry;
int i;
+ if (!iexec->execute)
+ return 1;
+
if (bios->bdcb.version != 0x40) {
NV_ERROR(bios->dev, "DCB table not version 4.0\n");
- return false;
+ return 0;
}
if (!bios->bdcb.gpio_table_ptr) {
NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
- return false;
+ return 0;
}
gpio_entry = gpio_table + gpio_table[1];
@@ -2660,13 +2626,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, r, v);
}
- return true;
+ return 1;
}
-/* hack to avoid moving the itbl_entry array before this function */
-int init_ram_restrict_zm_reg_group_blocklen;
-
-static bool
+static int
init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2692,21 +2655,21 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
uint8_t regincrement = bios->data[offset + 5];
uint8_t count = bios->data[offset + 6];
uint32_t strap_ramcfg, data;
- uint16_t blocklen;
+ /* previously set by 'M' BIT table */
+ uint16_t blocklen = bios->ram_restrict_group_count * 4;
+ int len = 7 + count * blocklen;
uint8_t index;
int i;
- /* previously set by 'M' BIT table */
- blocklen = init_ram_restrict_zm_reg_group_blocklen;
if (!iexec->execute)
- return true;
+ return len;
if (!blocklen) {
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
"been parsed?\n", offset);
- return false;
+ return 0;
}
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2724,10 +2687,10 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
reg += regincrement;
}
- return true;
+ return len;
}
-static bool
+static int
init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2744,14 +2707,14 @@ init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t dstreg = ROM32(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 9;
bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
- return true;
+ return 9;
}
-static bool
+static int
init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2769,20 +2732,21 @@ init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
uint32_t reg = ROM32(bios->data[offset + 1]);
uint8_t count = bios->data[offset + 5];
+ int len = 6 + count * 4;
int i;
if (!iexec->execute)
- return true;
+ return len;
for (i = 0; i < count; i++) {
uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
bios_wr32(bios, reg, data);
}
- return true;
+ return len;
}
-static bool
+static int
init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2793,10 +2757,10 @@ init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* Seemingly does nothing
*/
- return true;
+ return 1;
}
-static bool
+static int
init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2829,13 +2793,13 @@ init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
val <<= bios->data[offset + 16];
if (!iexec->execute)
- return true;
+ return 17;
bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
- return true;
+ return 17;
}
-static bool
+static int
init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2859,13 +2823,13 @@ init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
val = (val & mask) | ((val + add) & ~mask);
if (!iexec->execute)
- return true;
+ return 13;
bios_wr32(bios, reg, val);
- return true;
+ return 13;
}
-static bool
+static int
init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2883,32 +2847,33 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_device *dev = bios->dev;
struct nouveau_i2c_chan *auxch;
uint32_t addr = ROM32(bios->data[offset + 1]);
- uint8_t len = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 5];
+ int len = 6 + count * 2;
int ret, i;
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return false;
+ return 0;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return false;
+ return 0;
}
if (!iexec->execute)
- return true;
+ return len;
offset += 6;
- for (i = 0; i < len; i++, offset += 2) {
+ for (i = 0; i < count; i++, offset += 2) {
uint8_t data;
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return false;
+ return 0;
}
data &= bios->data[offset + 0];
@@ -2917,14 +2882,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
-static bool
+static int
init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2941,106 +2906,99 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_device *dev = bios->dev;
struct nouveau_i2c_chan *auxch;
uint32_t addr = ROM32(bios->data[offset + 1]);
- uint8_t len = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 5];
+ int len = 6 + count;
int ret, i;
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return false;
+ return 0;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return false;
+ return 0;
}
if (!iexec->execute)
- return true;
+ return len;
offset += 6;
- for (i = 0; i < len; i++, offset++) {
+ for (i = 0; i < count; i++, offset++) {
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
static struct init_tbl_entry itbl_entry[] = {
/* command name , id , length , offset , mult , command handler */
/* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
- { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog },
- { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat },
- { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll },
- { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat },
- { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy },
- { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not },
- { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition },
- { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched },
- { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 },
- { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 },
- { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte },
- { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte },
- { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c },
- { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds },
- { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group },
- { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch },
- { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr },
- { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr },
- { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group },
- { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time },
- { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence },
+ { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog },
+ { "INIT_REPEAT" , 0x33, init_repeat },
+ { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll },
+ { "INIT_END_REPEAT" , 0x36, init_end_repeat },
+ { "INIT_COPY" , 0x37, init_copy },
+ { "INIT_NOT" , 0x38, init_not },
+ { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
+ { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
+ { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
+ { "INIT_PLL2" , 0x4B, init_pll2 },
+ { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte },
+ { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte },
+ { "INIT_ZM_I2C" , 0x4E, init_zm_i2c },
+ { "INIT_TMDS" , 0x4F, init_tmds },
+ { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group },
+ { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch },
+ { "INIT_CR" , 0x52, init_cr },
+ { "INIT_ZM_CR" , 0x53, init_zm_cr },
+ { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
+ { "INIT_CONDITION_TIME" , 0x56, init_condition_time },
+ { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
- { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct },
- { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg },
- { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io },
- { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem },
- { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset },
- { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem },
- { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk },
- { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit },
- { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io },
- { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub },
- { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition },
- { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg },
- { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro },
- { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done },
- { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume },
+ { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
+ { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
+ { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
+ { "INIT_RESET" , 0x65, init_reset },
+ { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem },
+ { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk },
+ { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit },
+ { "INIT_IO" , 0x69, init_io },
+ { "INIT_SUB" , 0x6B, init_sub },
+ { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition },
+ { "INIT_NV_REG" , 0x6E, init_nv_reg },
+ { "INIT_MACRO" , 0x6F, init_macro },
+ { "INIT_DONE" , 0x71, init_done },
+ { "INIT_RESUME" , 0x72, init_resume },
/* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
- { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time },
- { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition },
- { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition },
- { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io },
- { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll },
- { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg },
- /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */
- { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll },
- { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c },
- { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d },
- { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio },
- /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */
- { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group },
- { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg },
- { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched },
- { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved },
- { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 },
- { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 },
- { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch },
- { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch },
- { NULL , 0 , 0 , 0 , 0 , NULL }
+ { "INIT_TIME" , 0x74, init_time },
+ { "INIT_CONDITION" , 0x75, init_condition },
+ { "INIT_IO_CONDITION" , 0x76, init_io_condition },
+ { "INIT_INDEX_IO" , 0x78, init_index_io },
+ { "INIT_PLL" , 0x79, init_pll },
+ { "INIT_ZM_REG" , 0x7A, init_zm_reg },
+ { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll },
+ { "INIT_8C" , 0x8C, init_8c },
+ { "INIT_8D" , 0x8D, init_8d },
+ { "INIT_GPIO" , 0x8E, init_gpio },
+ { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group },
+ { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg },
+ { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched },
+ { "INIT_RESERVED" , 0x92, init_reserved },
+ { "INIT_96" , 0x96, init_96 },
+ { "INIT_97" , 0x97, init_97 },
+ { "INIT_AUXCH" , 0x98, init_auxch },
+ { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
+ { NULL , 0 , NULL }
};
-static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i)
-{
- /* Calculates the length of a given init table entry. */
- return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier;
-}
-
#define MAX_TABLE_OPS 1000
static int
@@ -3056,7 +3014,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
* is changed back to EXECUTE.
*/
- int count = 0, i;
+ int count = 0, i, res;
uint8_t id;
/*
@@ -3076,22 +3034,21 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
offset, itbl_entry[i].id, itbl_entry[i].name);
/* execute eventual command handler */
- if (itbl_entry[i].handler)
- if (!(*itbl_entry[i].handler)(bios, offset, iexec))
- break;
+ res = (*itbl_entry[i].handler)(bios, offset, iexec);
+ if (!res)
+ break;
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += res;
} else {
NV_ERROR(bios->dev,
"0x%04X: Init table command not found: "
"0x%02X\n", offset, id);
return -ENOENT;
}
-
- /*
- * Add the offset of the current command including all data
- * of that command. The offset will then be pointing on the
- * next op code.
- */
- offset += get_init_table_entry_length(bios, offset, i);
}
if (offset >= bios->length)
@@ -3198,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
}
#ifdef __powerpc__
/* Powerbook specific quirks */
- if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
- nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
- if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
- if (script == LVDS_PANEL_ON) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
- }
- if (script == LVDS_PANEL_OFF) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
+ if ((dev->pci_device & 0xffff) == 0x0179 ||
+ (dev->pci_device & 0xffff) == 0x0189 ||
+ (dev->pci_device & 0xffff) == 0x0329) {
+ if (script == LVDS_RESET) {
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+
+ } else if (script == LVDS_PANEL_ON) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ | (1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
+
+ } else if (script == LVDS_PANEL_OFF) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ & ~(1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
}
}
#endif
@@ -3799,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct init_exec iexec = {true, false};
struct nvbios *bios = &dev_priv->VBIOS;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
@@ -3854,7 +3819,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
* script tables is a pointer to the script to execute.
*/
- NV_DEBUG(dev, "Searching for output entry for %d %d %d\n",
+ NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
dcbent->type, dcbent->location, dcbent->or);
otable = bios_output_config_match(dev, dcbent, table[1] +
bios->display.script_table_ptr,
@@ -3879,27 +3844,25 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
}
- bios->display.output = dcbent;
-
if (pxclk == 0) {
script = ROM16(otable[6]);
if (!script) {
- NV_DEBUG(dev, "output script 0 not found\n");
+ NV_DEBUG_KMS(dev, "output script 0 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
script = ROM16(otable[8]);
if (!script) {
- NV_DEBUG(dev, "output script 1 not found\n");
+ NV_DEBUG_KMS(dev, "output script 1 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
if (table[4] >= 12)
@@ -3907,12 +3870,12 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
else
script = 0;
if (!script) {
- NV_DEBUG(dev, "output script 2 not found\n");
+ NV_DEBUG_KMS(dev, "output script 2 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3924,19 +3887,19 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
script = ROM16(otable[table[4] + i*6 + 4]);
if (script)
script = clkcmptable(bios, script, -pxclk);
if (!script) {
- NV_DEBUG(dev, "clock script 1 not found\n");
+ NV_DEBUG_KMS(dev, "clock script 1 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
}
return 0;
@@ -4606,10 +4569,6 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
* stuff that we don't use - their use currently unknown
*/
- uint16_t rr_strap_xlat;
- uint8_t rr_group_count;
- int i;
-
/*
* Older bios versions don't have a sufficiently long table for
* what we want
@@ -4618,24 +4577,13 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
if (bitentry->id[1] < 2) {
- rr_group_count = bios->data[bitentry->offset + 2];
- rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]);
+ bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
+ bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
} else {
- rr_group_count = bios->data[bitentry->offset + 0];
- rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]);
+ bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
+ bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
}
- /* adjust length of INIT_87 */
- for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++);
- itbl_entry[i].length += rr_group_count * 4;
-
- /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */
- for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++);
- itbl_entry[i].length_multiplier = rr_group_count * 4;
-
- init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier;
- bios->ram_restrict_tbl_ptr = rr_strap_xlat;
-
return 0;
}
@@ -5234,7 +5182,7 @@ parse_dcb_connector_table(struct nvbios *bios)
int i;
if (!bios->bdcb.connector_table_ptr) {
- NV_DEBUG(dev, "No DCB connector table present\n");
+ NV_DEBUG_KMS(dev, "No DCB connector table present\n");
return;
}
@@ -5451,52 +5399,49 @@ static bool
parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
- if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
- conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
- conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
- conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
- conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
- conn != 0xf2205004 && conn != 0xf2209004) {
- NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
-
- /* cause output setting to fail for !TV, so message is seen */
- if ((conn & 0xf) != 0x1)
- dcb->entries = 0;
-
- return false;
- }
- /* most of the below is a "best guess" atm */
- entry->type = conn & 0xf;
- if (entry->type == 2)
- /* another way of specifying straps based lvds... */
+ switch (conn & 0x0000000f) {
+ case 0:
+ entry->type = OUTPUT_ANALOG;
+ break;
+ case 1:
+ entry->type = OUTPUT_TV;
+ break;
+ case 2:
+ case 3:
entry->type = OUTPUT_LVDS;
- if (entry->type == 4) { /* digital */
- if (conn & 0x10)
- entry->type = OUTPUT_LVDS;
- else
+ break;
+ case 4:
+ switch ((conn & 0x000000f0) >> 4) {
+ case 0:
entry->type = OUTPUT_TMDS;
+ break;
+ case 1:
+ entry->type = OUTPUT_LVDS;
+ break;
+ default:
+ NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
+ (conn & 0x000000f0) >> 4);
+ return false;
+ }
+ break;
+ default:
+ NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
+ return false;
}
- /* what's in bits 5-13? could be some encoder maker thing, in tv case */
- entry->i2c_index = (conn >> 14) & 0xf;
- /* raw heads field is in range 0-1, so move to 1-2 */
- entry->heads = ((conn >> 18) & 0x7) + 1;
- entry->location = (conn >> 21) & 0xf;
- /* unused: entry->bus = (conn >> 25) & 0x7; */
- /* set or to be same as heads -- hopefully safe enough */
- entry->or = entry->heads;
+
+ entry->i2c_index = (conn & 0x0003c000) >> 14;
+ entry->heads = ((conn & 0x001c0000) >> 18) + 1;
+ entry->or = entry->heads; /* same as heads, hopefully safe enough */
+ entry->location = (conn & 0x01e00000) >> 21;
+ entry->bus = (conn & 0x0e000000) >> 25;
entry->duallink_possible = false;
switch (entry->type) {
case OUTPUT_ANALOG:
entry->crtconf.maxfreq = (conf & 0xffff) * 10;
break;
- case OUTPUT_LVDS:
- /*
- * This is probably buried in conn's unknown bits.
- * This will upset EDID-ful models, if they exist
- */
- entry->lvdsconf.use_straps_for_mode = true;
- entry->lvdsconf.use_power_scripts = true;
+ case OUTPUT_TV:
+ entry->tvconf.has_component_output = false;
break;
case OUTPUT_TMDS:
/*
@@ -5505,8 +5450,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
*/
fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
break;
- case OUTPUT_TV:
- entry->tvconf.has_component_output = false;
+ case OUTPUT_LVDS:
+ if ((conn & 0x00003f00) != 0x10)
+ entry->lvdsconf.use_straps_for_mode = true;
+ entry->lvdsconf.use_power_scripts = true;
+ break;
+ default:
break;
}
@@ -5581,11 +5530,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
dcb->entries = newentries;
}
-static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct bios_parsed_dcb *bdcb = &bios->bdcb;
struct parsed_dcb *dcb;
- uint16_t dcbptr, i2ctabptr = 0;
+ uint16_t dcbptr = 0, i2ctabptr = 0;
uint8_t *dcbtable;
uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
bool configblock = true;
@@ -5596,16 +5547,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
dcb->entries = 0;
/* get the offset from 0x36 */
- dcbptr = ROM16(bios->data[0x36]);
+ if (dev_priv->card_type > NV_04) {
+ dcbptr = ROM16(bios->data[0x36]);
+ if (dcbptr == 0x0000)
+ NV_WARN(dev, "No output data (DCB) found in BIOS\n");
+ }
+ /* this situation likely means a really old card, pre DCB */
if (dcbptr == 0x0) {
- NV_WARN(dev, "No output data (DCB) found in BIOS, "
- "assuming a CRT output exists\n");
- /* this situation likely means a really old card, pre DCB */
+ NV_INFO(dev, "Assuming a CRT output exists\n");
fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
- if (nv04_tv_identify(dev,
- bios->legacy.i2c_indices.tv) >= 0)
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_tv_output(dcb, twoHeads);
return 0;
@@ -5909,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->VBIOS;
struct init_exec iexec = { true, false };
+ mutex_lock(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
+ mutex_unlock(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -5920,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->VBIOS;
memset(bios, 0, sizeof(struct nvbios));
+ mutex_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 1d5f10bd78e..fd94bd6dc26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
struct drm_device *dev;
struct nouveau_bios_info pub;
+ struct mutex lock;
+
uint8_t data[NV_PROM_SIZE];
unsigned int length;
bool execute;
@@ -227,6 +229,7 @@ struct nvbios {
uint16_t pll_limit_tbl_ptr;
uint16_t ram_restrict_tbl_ptr;
+ uint8_t ram_restrict_group_count;
uint16_t some_script_ptr; /* BIT I + 14 */
uint16_t init96_tbl_ptr; /* BIT I + 16 */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 320a14bceb9..028719fddf7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -33,10 +33,13 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include <linux/log2.h>
+
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
ttm_bo_kunmap(&nvbo->kmap);
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
+ if (nvbo->tile)
+ nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
+
spin_lock(&dev_priv->ttm.bo_list_lock);
list_del(&nvbo->head);
spin_unlock(&dev_priv->ttm.bo_list_lock);
kfree(nvbo);
}
+static void
+nouveau_bo_fixup_align(struct drm_device *dev,
+ uint32_t tile_mode, uint32_t tile_flags,
+ int *align, int *size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * Some of the tile_flags have a periodic structure of N*4096 bytes,
+ * align to to that as well as the page size. Align the size to the
+ * appropriate boundaries. This does imply that sizes are rounded up
+ * 3-7 pages, so be aware of this and do not waste memory by allocating
+ * many small buffers.
+ */
+ if (dev_priv->card_type == NV_50) {
+ uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
+ int i;
+
+ switch (tile_flags) {
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7a00:
+ if (is_power_of_2(block_size)) {
+ for (i = 1; i < 10; i++) {
+ *align = 12 * i * block_size;
+ if (!(*align % 65536))
+ break;
+ }
+ } else {
+ for (i = 1; i < 10; i++) {
+ *align = 8 * i * block_size;
+ if (!(*align % 65536))
+ break;
+ }
+ }
+ *size = roundup(*size, *align);
+ break;
+ default:
+ break;
+ }
+
+ } else {
+ if (tile_mode) {
+ if (dev_priv->chipset >= 0x40) {
+ *align = 65536;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x30) {
+ *align = 32768;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x20) {
+ *align = 16384;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x10) {
+ *align = 16384;
+ *size = roundup(*size, 32 * tile_mode);
+ }
+ }
+ }
+
+ /* ALIGN works only on powers of two. */
+ *size = roundup(*size, PAGE_SIZE);
+
+ if (dev_priv->card_type == NV_50) {
+ *size = roundup(*size, 65536);
+ *align = max(65536, *align);
+ }
+}
+
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret, n = 0;
+ int ret = 0;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
- /*
- * Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Overallocate memory to
- * avoid corruption of other buffer objects.
- */
- switch (tile_flags) {
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7a00:
- if (dev_priv->chipset >= 0xA0) {
- /* This is based on high end cards with 448 bits
- * memory bus, could be different elsewhere.*/
- size += 6 * 28672;
- /* 8 * 28672 is the actual alignment requirement,
- * but we must also align to page size. */
- align = 2 * 8 * 28672;
- } else if (dev_priv->chipset >= 0x90) {
- size += 3 * 16384;
- align = 12 * 16834;
- } else {
- size += 3 * 8192;
- /* 12 * 8192 is the actual alignment requirement,
- * but we must also align to page size. */
- align = 2 * 12 * 8192;
- }
- break;
- default:
- break;
- }
-
+ nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
align >>= PAGE_SHIFT;
- size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- if (dev_priv->card_type == NV_50) {
- size = (size + 65535) & ~65535;
- if (align < (65536 / PAGE_SIZE))
- align = (65536 / PAGE_SIZE);
- }
-
- if (flags & TTM_PL_FLAG_VRAM)
- nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
- if (flags & TTM_PL_FLAG_TT)
- nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
- nvbo->placement.placement = nvbo->placements;
- nvbo->placement.busy_placement = nvbo->placements;
- nvbo->placement.num_placement = n;
- nvbo->placement.num_busy_placement = n;
+ nouveau_bo_placement_set(nvbo, flags);
nvbo->channel = chan;
- nouveau_bo_placement_set(nvbo, flags);
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
@@ -154,6 +187,11 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
nvbo->placement.busy_placement = nvbo->placements;
nvbo->placement.num_placement = n;
nvbo->placement.num_busy_placement = n;
+
+ if (nvbo->pin_refcnt) {
+ while (n--)
+ nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
+ }
}
int
@@ -311,8 +349,10 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
struct drm_device *dev = dev_priv->dev;
switch (dev_priv->gart_info.type) {
+#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
+#endif
case NOUVEAU_GART_SGDMA:
return nouveau_sgdma_init_ttm(dev);
default:
@@ -398,16 +438,23 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
struct nouveau_bo *nvbo = nouveau_bo(bo);
switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
+ TTM_PL_FLAG_SYSTEM);
+ break;
default:
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
break;
}
+
+ *pl = nvbo->placement;
}
/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
* TTM_PL_{VRAM,TT} directly.
*/
+
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict, bool no_wait,
@@ -422,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
evict, no_wait, new_mem);
+ if (nvbo->channel && nvbo->channel != chan)
+ ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
return ret;
}
@@ -442,22 +491,20 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
}
static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+ int no_wait, struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
uint64_t src_offset, dst_offset;
uint32_t page_count;
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->tile_flags || nvbo->no_vm) {
+ if (!chan || nvbo->tile_flags || nvbo->no_vm)
chan = dev_priv->channel;
- if (!chan)
- return -EINVAL;
- }
src_offset = old_mem->mm_node->start << PAGE_SHIFT;
dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
@@ -537,7 +584,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = &placement_memtype;
+ placement.placement = placement.busy_placement = &placement_memtype;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -549,7 +596,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
if (ret)
goto out;
@@ -575,7 +622,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = &placement_memtype;
+ placement.placement = placement.busy_placement = &placement_memtype;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -587,7 +634,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
if (ret)
goto out;
@@ -602,51 +649,106 @@ out:
}
static int
-nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
+ struct nouveau_tile_reg **new_tile)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_device *dev = dev_priv->dev;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ uint64_t offset;
int ret;
- if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
- !nvbo->no_vm) {
- uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
+ /* Nothing to do. */
+ *new_tile = NULL;
+ return 0;
+ }
+
+ offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev,
offset + dev_priv->vm_vram_base,
new_mem->size, nvbo->tile_flags,
offset);
if (ret)
return ret;
+
+ } else if (dev_priv->card_type >= NV_10) {
+ *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
+ nvbo->tile_mode);
}
- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ return 0;
+}
+
+static void
+nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
+ struct nouveau_tile_reg *new_tile,
+ struct nouveau_tile_reg **old_tile)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ if (dev_priv->card_type >= NV_10 &&
+ dev_priv->card_type < NV_50) {
+ if (*old_tile)
+ nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
+ *old_tile = new_tile;
+ }
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_tile_reg *new_tile = NULL;
+ int ret = 0;
+
+ ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (ret)
+ return ret;
+
+ /* Software copy if the card isn't up and running yet. */
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
+ !dev_priv->channel) {
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ goto out;
+ }
+
+ /* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
BUG_ON(bo->mem.mm_node != NULL);
bo->mem = *new_mem;
new_mem->mm_node = NULL;
- return 0;
+ goto out;
}
- if (new_mem->mem_type == TTM_PL_SYSTEM) {
- if (old_mem->mem_type == TTM_PL_SYSTEM)
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
- if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else {
- if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- }
+ /* Hardware assisted copy. */
+ if (new_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ else if (old_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ else
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
- return 0;
+ if (!ret)
+ goto out;
+
+ /* Fallback to software copy. */
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+
+out:
+ if (ret)
+ nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+ else
+ nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 9aaa972f882..2281f99da7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret;
}
+ nouveau_dma_pre_init(chan);
+
/* Locate channel's user control regs */
if (dev_priv->card_type < NV_40)
user = NV03_USER(channel);
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return 0;
}
-int
-nouveau_channel_idle(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t caches;
- int idle;
-
- if (!chan) {
- NV_ERROR(dev, "no channel...\n");
- return 1;
- }
-
- caches = nv_rd32(dev, NV03_PFIFO_CACHES);
- nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
-
- if (engine->fifo.channel_id(dev) != chan->id) {
- struct nouveau_gpuobj *ramfc =
- chan->ramfc ? chan->ramfc->gpuobj : NULL;
-
- if (!ramfc) {
- NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
- return 1;
- }
-
- engine->instmem.prepare_access(dev, false);
- if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
- idle = 0;
- else
- idle = 1;
- engine->instmem.finish_access(dev);
- } else {
- idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHES, caches);
- return idle;
-}
-
/* stops a fifo */
void
nouveau_channel_free(struct nouveau_channel *chan)
@@ -317,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
/* Ensure the channel is no longer active on the GPU */
pfifo->reassign(dev, false);
- if (pgraph->channel(dev) == chan) {
- pgraph->fifo_access(dev, false);
+ pgraph->fifo_access(dev, false);
+ if (pgraph->channel(dev) == chan)
pgraph->unload_context(dev);
- pgraph->fifo_access(dev, true);
- }
pgraph->destroy_context(chan);
+ pgraph->fifo_access(dev, true);
if (pfifo->channel_id(dev) == chan->id) {
pfifo->disable(dev);
@@ -414,7 +374,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
init->subchan[0].grclass = 0x0039;
else
init->subchan[0].grclass = 0x5039;
- init->nr_subchan = 1;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
/* Named memory object area */
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 032cf098fa1..d2f63353ea9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
*
*/
+#include <acpi/button.h>
+
#include "drmP.h"
#include "drm_edid.h"
#include "drm_crtc_helper.h"
+
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
@@ -83,14 +86,17 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
static void
nouveau_connector_destroy(struct drm_connector *drm_connector)
{
- struct nouveau_connector *connector = nouveau_connector(drm_connector);
- struct drm_device *dev = connector->base.dev;
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(drm_connector);
+ struct drm_device *dev;
- NV_DEBUG(dev, "\n");
-
- if (!connector)
+ if (!nv_connector)
return;
+ dev = nv_connector->base.dev;
+ NV_DEBUG_KMS(dev, "\n");
+
+ kfree(nv_connector->edid);
drm_sysfs_connector_remove(drm_connector);
drm_connector_cleanup(drm_connector);
kfree(drm_connector);
@@ -233,10 +239,21 @@ nouveau_connector_detect(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (nv_encoder && nv_connector->native_mode) {
+#ifdef CONFIG_ACPI
+ if (!nouveau_ignorelid && !acpi_lid_open())
+ return connector_status_disconnected;
+#endif
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
+ /* Cleanup the previous EDID block. */
+ if (nv_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ kfree(nv_connector->edid);
+ nv_connector->edid = NULL;
+ }
+
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +264,7 @@ nouveau_connector_detect(struct drm_connector *connector)
if (!nv_connector->edid) {
NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
drm_get_connector_name(connector));
- return connector_status_disconnected;
+ goto detect_analog;
}
if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +298,7 @@ nouveau_connector_detect(struct drm_connector *connector)
return connector_status_connected;
}
+detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -420,7 +438,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
/* Use preferred mode if there is one.. */
list_for_each_entry(mode, &connector->base.probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- NV_DEBUG(dev, "native mode from preferred\n");
+ NV_DEBUG_KMS(dev, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
}
@@ -445,7 +463,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
largest = mode;
}
- NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n",
+ NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
high_w, high_h, high_v);
return largest ? drm_mode_duplicate(dev, largest) : NULL;
}
@@ -687,8 +705,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
*/
if (!nv_connector->edid && !nv_connector->native_mode &&
!dev_priv->VBIOS.pub.fp_no_ddc) {
- nv_connector->edid =
+ struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
+ if (edid) {
+ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ *(nv_connector->edid) = *edid;
+ }
}
if (!nv_connector->edid)
@@ -725,7 +747,7 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
struct drm_encoder *encoder;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 703553687b2..50d9e67745a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -29,12 +29,22 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+void
+nouveau_dma_pre_init(struct nouveau_channel *chan)
+{
+ chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ chan->dma.put = 0;
+ chan->dma.cur = chan->dma.put;
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+}
+
int
nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *m2mf = NULL;
+ struct nouveau_gpuobj *nvsw = NULL;
int ret, i;
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
if (ret)
return ret;
+ /* Create an NV_SW object for various sync purposes */
+ ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+ if (ret)
+ return ret;
+
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
if (ret)
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
}
- /* Initialise DMA vars */
- chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
- chan->dma.put = 0;
- chan->dma.cur = chan->dma.put;
- chan->dma.free = chan->dma.max - chan->dma.cur;
-
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan)
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
OUT_RING(chan, NvNotify0);
+ /* Initialise NV_SW */
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
@@ -106,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
chan->dma.cur += nr_dwords;
}
-static inline bool
-READ_GET(struct nouveau_channel *chan, uint32_t *get)
+/* Fetch and adjust GPU GET pointer
+ *
+ * Returns:
+ * value >= 0, the adjusted GET pointer
+ * -EINVAL if GET pointer currently outside main push buffer
+ * -EBUSY if timeout exceeded
+ */
+static inline int
+READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
{
uint32_t val;
val = nvchan_rd32(chan, chan->user_get);
- if (val < chan->pushbuf_base ||
- val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
- /* meaningless to dma_wait() except to know whether the
- * GPU has stalled or not
- */
- *get = val;
- return false;
+
+ /* reset counter as long as GET is still advancing, this is
+ * to avoid misdetecting a GPU lockup if the GPU happens to
+ * just be processing an operation that takes a long time
+ */
+ if (val != *prev_get) {
+ *prev_get = val;
+ *timeout = 0;
+ }
+
+ if ((++*timeout & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (*timeout > 100000)
+ return -EBUSY;
}
- *get = (val - chan->pushbuf_base) >> 2;
- return true;
+ if (val < chan->pushbuf_base ||
+ val > chan->pushbuf_base + (chan->dma.max << 2))
+ return -EINVAL;
+
+ return (val - chan->pushbuf_base) >> 2;
}
int
nouveau_dma_wait(struct nouveau_channel *chan, int size)
{
- uint32_t get, prev_get = 0, cnt = 0;
- bool get_valid;
+ uint32_t prev_get = 0, cnt = 0;
+ int get;
while (chan->dma.free < size) {
- /* reset counter as long as GET is still advancing, this is
- * to avoid misdetecting a GPU lockup if the GPU happens to
- * just be processing an operation that takes a long time
- */
- get_valid = READ_GET(chan, &get);
- if (get != prev_get) {
- prev_get = get;
- cnt = 0;
- }
-
- if ((++cnt & 0xff) == 0) {
- DRM_UDELAY(1);
- if (cnt > 100000)
- return -EBUSY;
- }
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get == -EBUSY))
+ return -EBUSY;
/* loop until we have a usable GET pointer. the value
* we read from the GPU may be outside the main ring if
@@ -157,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
* from the SKIPS area, so the code below doesn't have to deal
* with some fun corner cases.
*/
- if (!get_valid || get < NOUVEAU_DMA_SKIPS)
+ if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
continue;
if (get <= chan->dma.cur) {
@@ -183,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
* after processing the currently pending commands.
*/
OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+
+ /* wait for GET to depart from the skips area.
+ * prevents writing GET==PUT and causing a race
+ * condition that causes us to think the GPU is
+ * idle when it's not.
+ */
+ do {
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get == -EBUSY))
+ return -EBUSY;
+ if (unlikely(get == -EINVAL))
+ continue;
+ } while (get <= NOUVEAU_DMA_SKIPS);
WRITE_PUT(NOUVEAU_DMA_SKIPS);
/* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 04e85d8f757..dabfd655f93 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -46,10 +46,11 @@
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
NvSubM2MF = 0,
- NvSub2D = 1,
- NvSubCtxSurf2D = 1,
- NvSubGdiRect = 2,
- NvSubImageBlit = 3
+ NvSubSw = 1,
+ NvSub2D = 2,
+ NvSubCtxSurf2D = 2,
+ NvSubGdiRect = 3,
+ NvSubImageBlit = 4
};
/* Object handles. */
@@ -67,6 +68,7 @@ enum {
NvClipRect = 0x8000000b,
NvGdiRect = 0x8000000c,
NvImageBlit = 0x8000000d,
+ NvSw = 0x8000000e,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index de61f4640e1..f954ad93e81 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -187,7 +187,7 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
if (ret)
return false;
- NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
+ NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
/* Keep all lanes at the same level.. */
for (i = 0; i < nv_encoder->dp.link_nr; i++) {
@@ -228,7 +228,7 @@ nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
int dpe_headerlen, ret, i;
- NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
config[0], config[1], config[2], config[3]);
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
@@ -276,12 +276,12 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
bool cr_done, cr_max_vs, eq_done;
int ret = 0, i, tries, voltage;
- NV_DEBUG(dev, "link training!!\n");
+ NV_DEBUG_KMS(dev, "link training!!\n");
train:
cr_done = eq_done = false;
/* set link configuration */
- NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n",
+ NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
@@ -297,7 +297,7 @@ train:
return false;
/* clock recovery */
- NV_DEBUG(dev, "\tbegin cr\n");
+ NV_DEBUG_KMS(dev, "\tbegin cr\n");
ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
if (ret)
goto stop;
@@ -314,7 +314,7 @@ train:
ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
if (ret)
break;
- NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
status[0], status[1]);
cr_done = true;
@@ -346,7 +346,7 @@ train:
goto stop;
/* channel equalisation */
- NV_DEBUG(dev, "\tbegin eq\n");
+ NV_DEBUG_KMS(dev, "\tbegin eq\n");
ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
if (ret)
goto stop;
@@ -357,7 +357,7 @@ train:
ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
if (ret)
break;
- NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
status[0], status[1]);
eq_done = true;
@@ -395,9 +395,9 @@ stop:
/* retry at a lower setting, if possible */
if (!ret && !(eq_done && cr_done)) {
- NV_DEBUG(dev, "\twe failed\n");
+ NV_DEBUG_KMS(dev, "\twe failed\n");
if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
- NV_DEBUG(dev, "retry link training at low rate\n");
+ NV_DEBUG_KMS(dev, "retry link training at low rate\n");
nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
goto train;
}
@@ -418,7 +418,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
if (ret)
return false;
- NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n"
+ NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
"display: link_bw %d, link_nr %d version 0x%02x\n",
nv_encoder->dcb->dpconf.link_bw,
nv_encoder->dcb->dpconf.link_nr,
@@ -446,7 +446,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint32_t tmp, ctrl, stat = 0, data32[4] = {};
int ret = 0, i, index = auxch->rd;
- NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
+ NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
@@ -472,7 +472,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
if (!(cmd & 1)) {
memcpy(data32, data, data_nr);
for (i = 0; i < 4; i++) {
- NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]);
+ NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
}
}
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
nv_rd32(dev, NV50_AUXCH_CTRL(index)));
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
udelay(400);
@@ -502,9 +503,14 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
}
if (cmd & 1) {
+ if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
for (i = 0; i < 4; i++) {
data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
- NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]);
+ NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
}
memcpy(data, data32, data_nr);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 35249c35118..da3b93b8450 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -35,6 +35,10 @@
#include "drm_pciids.h"
+MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
+int nouveau_ctxfw = 0;
+module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+
MODULE_PARM_DESC(noagp, "Disable AGP");
int nouveau_noagp;
module_param_named(noagp, nouveau_noagp, int, 0400);
@@ -52,7 +56,7 @@ int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify;
+int nouveau_vram_notify = 1;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -67,6 +71,18 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
int nouveau_uscript_tmds = -1;
module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
+MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
+int nouveau_ignorelid = 0;
+module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable all acceleration");
+int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -273,7 +289,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
chan = dev_priv->fifos[i];
- if (!chan)
+ if (!chan || !chan->pushbuf_bo)
continue;
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
@@ -341,7 +357,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = nouveau_ttm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 88b4c7b77e7..1c15ef37b71 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,15 +54,24 @@ struct nouveau_fpriv {
#include "nouveau_drm.h"
#include "nouveau_reg.h"
#include "nouveau_bios.h"
+struct nouveau_grctx;
#define MAX_NUM_DCB_ENTRIES 16
#define NOUVEAU_MAX_CHANNEL_NR 128
+#define NOUVEAU_MAX_TILE_NR 15
#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
#define NV50_VM_BLOCK (512*1024*1024ULL)
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_tile_reg {
+ struct nouveau_fence *fence;
+ uint32_t addr;
+ uint32_t size;
+ bool used;
+};
+
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
@@ -82,6 +91,7 @@ struct nouveau_bo {
uint32_t tile_mode;
uint32_t tile_flags;
+ struct nouveau_tile_reg *tile;
struct drm_gem_object *gem;
struct drm_file *cpu_filp;
@@ -276,8 +286,13 @@ struct nouveau_timer_engine {
};
struct nouveau_fb_engine {
+ int num_tiles;
+
int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev);
+
+ void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch);
};
struct nouveau_fifo_engine {
@@ -291,6 +306,8 @@ struct nouveau_fifo_engine {
void (*disable)(struct drm_device *);
void (*enable)(struct drm_device *);
bool (*reassign)(struct drm_device *, bool enable);
+ bool (*cache_flush)(struct drm_device *dev);
+ bool (*cache_pull)(struct drm_device *dev, bool enable);
int (*channel_id)(struct drm_device *);
@@ -317,6 +334,7 @@ struct nouveau_pgraph_engine {
bool accel_blocked;
void *ctxprog;
void *ctxvals;
+ int grctx_size;
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -328,6 +346,9 @@ struct nouveau_pgraph_engine {
void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+
+ void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch);
};
struct nouveau_engine {
@@ -488,6 +509,8 @@ struct drm_nouveau_private {
void __iomem *ramin;
uint32_t ramin_size;
+ struct nouveau_bo *vga_ram;
+
struct workqueue_struct *wq;
struct work_struct irq_work;
@@ -546,6 +569,12 @@ struct drm_nouveau_private {
unsigned long sg_handle;
} gart_info;
+ /* nv10-nv40 tiling regions */
+ struct {
+ struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+ spinlock_t lock;
+ } tile;
+
/* G8x/G9x virtual address space */
uint64_t vm_gart_base;
uint64_t vm_gart_size;
@@ -554,6 +583,7 @@ struct drm_nouveau_private {
uint64_t vm_end;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
+ uint64_t vram_sys_base;
/* the mtrr covering the FB */
int fb_mtrr;
@@ -647,6 +677,10 @@ extern int nouveau_fbpercrtc;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
+extern int nouveau_ctxfw;
+extern int nouveau_ignorelid;
+extern int nouveau_nofbaccel;
+extern int nouveau_noaccel;
/* nouveau_state.c */
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
@@ -682,6 +716,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
extern int nouveau_mem_init(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
+ uint32_t addr,
+ uint32_t size,
+ uint32_t pitch);
+extern void nv10_mem_expire_tiling(struct drm_device *dev,
+ struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence);
extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
uint32_t size, uint32_t flags,
uint64_t phys);
@@ -710,7 +751,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
extern void nouveau_channel_free(struct nouveau_channel *);
-extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
extern int nouveau_gpuobj_early_init(struct drm_device *);
@@ -753,6 +793,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
uint32_t *o_ret);
extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
+ struct nouveau_gpuobj **);
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -801,6 +843,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
#endif
/* nouveau_dma.c */
+extern void nouveau_dma_pre_init(struct nouveau_channel *);
extern int nouveau_dma_init(struct nouveau_channel *);
extern int nouveau_dma_wait(struct nouveau_channel *, int size);
@@ -876,16 +919,22 @@ extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
+extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
+extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
+extern bool nv04_fifo_cache_flush(struct drm_device *);
+extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
extern int nv04_fifo_channel_id(struct drm_device *);
extern int nv04_fifo_create_context(struct nouveau_channel *);
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
@@ -938,6 +987,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
extern int nv10_graph_load_context(struct nouveau_channel *);
extern int nv10_graph_unload_context(struct drm_device *);
extern void nv10_graph_context_switch(struct drm_device *);
+extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv20_graph.c */
extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
@@ -949,6 +1000,8 @@ extern int nv20_graph_unload_context(struct drm_device *);
extern int nv20_graph_init(struct drm_device *);
extern void nv20_graph_takedown(struct drm_device *);
extern int nv30_graph_init(struct drm_device *);
+extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv40_graph.c */
extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
@@ -959,9 +1012,9 @@ extern int nv40_graph_create_context(struct nouveau_channel *);
extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_unload_context(struct drm_device *);
-extern int nv40_grctx_init(struct drm_device *);
-extern void nv40_grctx_fini(struct drm_device *);
-extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv40_grctx_init(struct nouveau_grctx *);
+extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv50_graph.c */
extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
@@ -975,6 +1028,12 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
+/* nouveau_grctx.c */
+extern int nouveau_grctx_prog_load(struct drm_device *);
+extern void nouveau_grctx_vals_load(struct drm_device *,
+ struct nouveau_gpuobj *);
+extern void nouveau_grctx_fini(struct drm_device *);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
@@ -1023,8 +1082,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
/* nv04_dac.c */
extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
- struct drm_connector *connector);
+extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
extern int nv04_dac_output_offset(struct drm_encoder *encoder);
extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
@@ -1042,9 +1100,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
/* nv17_tv.c */
extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
- struct drm_connector *connector,
- uint32_t pin_mask);
/* nv04_display.c */
extern int nv04_display_create(struct drm_device *);
@@ -1207,14 +1262,24 @@ static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
pci_name(d->pdev), ##arg)
#ifndef NV_DEBUG_NOTRACE
#define NV_DEBUG(d, fmt, arg...) do { \
- if (drm_debug) { \
+ if (drm_debug & DRM_UT_DRIVER) { \
+ NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
+ __LINE__, ##arg); \
+ } \
+} while (0)
+#define NV_DEBUG_KMS(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_KMS) { \
NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
__LINE__, ##arg); \
} \
} while (0)
#else
#define NV_DEBUG(d, fmt, arg...) do { \
- if (drm_debug) \
+ if (drm_debug & DRM_UT_DRIVER) \
+ NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
+} while (0)
+#define NV_DEBUG_KMS(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_KMS) \
NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
} while (0)
#endif
@@ -1273,14 +1338,14 @@ nv_two_reg_pll(struct drm_device *dev)
return false;
}
-#define NV50_NVSW 0x0000506e
-#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
-#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
-#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
-#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
-#define NV50_NVSW_DMA_VBLSEM 0x0000018c
-#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
-#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
-#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
+#define NV_SW 0x0000506e
+#define NV_SW_DMA_SEMAPHORE 0x00000060
+#define NV_SW_SEMAPHORE_OFFSET 0x00000064
+#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
+#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
+#define NV_SW_DMA_VBLSEM 0x0000018c
+#define NV_SW_VBLSEM_OFFSET 0x00000400
+#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
+#define NV_SW_VBLSEM_RELEASE 0x00000408
#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 36e8c5e4503..ea879a2efef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -58,14 +58,13 @@ nouveau_fbcon_sync(struct fb_info *info)
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
- if (!chan->accel_done ||
+ if (!chan || !chan->accel_done ||
info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_HWACCEL_DISABLED)
return 0;
if (RING_SPACE(chan, 4)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info)
}
if (ret) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
@@ -109,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
+static struct fb_ops nv04_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv04_fbcon_fillrect,
+ .fb_copyarea = nv04_fbcon_copyarea,
+ .fb_imageblit = nv04_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static struct fb_ops nv50_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv50_fbcon_fillrect,
+ .fb_copyarea = nv50_fbcon_copyarea,
+ .fb_imageblit = nv50_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
@@ -212,11 +238,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mode_cmd.bpp = surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
- mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
+ mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
mode_cmd.depth = surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
0, 0x0000, false, true, &nvbo);
@@ -269,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
dev_priv->fbdev_info = info;
strcpy(info->fix.id, "nouveaufb");
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+ if (nouveau_nofbaccel)
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
+ else
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
dev_priv->vm_vram_base;
@@ -318,14 +348,18 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
par->nouveau_fb = nouveau_fb;
par->dev = dev;
- switch (dev_priv->card_type) {
- case NV_50:
- nv50_fbcon_accel_init(info);
- break;
- default:
- nv04_fbcon_accel_init(info);
- break;
- };
+ if (dev_priv->channel && !nouveau_nofbaccel) {
+ switch (dev_priv->card_type) {
+ case NV_50:
+ nv50_fbcon_accel_init(info);
+ info->fbops = &nv50_fbcon_ops;
+ break;
+ default:
+ nv04_fbcon_accel_init(info);
+ info->fbops = &nv04_fbcon_ops;
+ break;
+ };
+ }
nouveau_fbcon_zfill(dev);
@@ -347,7 +381,7 @@ out:
int
nouveau_fbcon_probe(struct drm_device *dev)
{
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
}
@@ -378,3 +412,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
return 0;
}
+
+void nouveau_fbcon_gpu_lockup(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 8531140fedb..f9c34e1a8c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,8 +40,15 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
void nouveau_fbcon_zfill(struct drm_device *dev);
+void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info);
+void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
+void nouveau_fbcon_gpu_lockup(struct fb_info *info);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0cff7eb3690..faddf53ff9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock_irqrestore(&chan->fence.lock, flags);
- BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
+ BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
OUT_RING(chan, fence->sequence);
FIRE_RING(chan);
@@ -205,7 +205,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
schedule_timeout(1);
if (intr && signal_pending(current)) {
- ret = -ERESTART;
+ ret = -ERESTARTSYS;
break;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 11f831f0ddc..70cc30803e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
}
struct validate_op {
- struct nouveau_fence *fence;
struct list_head vram_list;
struct list_head gart_list;
struct list_head both_list;
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
}
static void
-validate_fini(struct validate_op *op, bool success)
+validate_fini(struct validate_op *op, struct nouveau_fence* fence)
{
- struct nouveau_fence *fence = op->fence;
-
- if (unlikely(!success))
- op->fence = NULL;
-
- validate_fini_list(&op->vram_list, op->fence);
- validate_fini_list(&op->gart_list, op->fence);
- validate_fini_list(&op->both_list, op->fence);
- nouveau_fence_unref((void *)&fence);
+ validate_fini_list(&op->vram_list, fence);
+ validate_fini_list(&op->gart_list, fence);
+ validate_fini_list(&op->both_list, fence);
}
static int
@@ -328,6 +321,7 @@ retry:
else {
NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
b->valid_domains);
+ list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
@@ -342,8 +336,6 @@ retry:
}
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- if (ret == -ERESTART)
- ret = -EAGAIN;
if (ret)
return ret;
goto retry;
@@ -422,10 +414,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
INIT_LIST_HEAD(&op->gart_list);
INIT_LIST_HEAD(&op->both_list);
- ret = nouveau_fence_new(chan, &op->fence, false);
- if (ret)
- return ret;
-
if (nr_buffers == 0)
return 0;
@@ -479,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
struct drm_nouveau_gem_pushbuf_bo *bo,
- int nr_relocs, uint64_t ptr_relocs,
- int nr_dwords, int first_dword,
+ unsigned nr_relocs, uint64_t ptr_relocs,
+ unsigned nr_dwords, unsigned first_dword,
uint32_t *pushbuf, bool is_iomem)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
struct drm_device *dev = chan->dev;
- int ret = 0, i;
+ int ret = 0;
+ unsigned i;
reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
@@ -543,6 +532,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
struct nouveau_channel *chan;
struct validate_op op;
+ struct nouveau_fence* fence = 0;
uint32_t *pushbuf = NULL;
int ret = 0, do_reloc = 0, i;
@@ -599,7 +589,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
OUT_RINGp(chan, pushbuf, req->nr_dwords);
- ret = nouveau_fence_emit(op.fence);
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
@@ -607,7 +597,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
if (nouveau_gem_pushbuf_sync(chan)) {
- ret = nouveau_fence_wait(op.fence, NULL, false, false);
+ ret = nouveau_fence_wait(fence, NULL, false, false);
if (ret) {
for (i = 0; i < req->nr_dwords; i++)
NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
@@ -616,7 +606,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
out:
- validate_fini(&op, ret == 0);
+ validate_fini(&op, fence);
+ nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(pushbuf);
kfree(bo);
@@ -636,6 +627,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
struct nouveau_bo *pbbo;
struct validate_op op;
+ struct nouveau_fence* fence = 0;
int i, ret = 0, do_reloc = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
@@ -677,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
}
pbbo = nouveau_gem_object(gem);
+ if ((req->offset & 3) || req->nr_dwords < 2 ||
+ (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
+ (unsigned long)req->nr_dwords >
+ ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
+ NV_ERROR(dev, "pb call misaligned or out of bounds: "
+ "%d + %d * 4 > %ld\n",
+ req->offset, req->nr_dwords, pbbo->bo.mem.size);
+ ret = -EINVAL;
+ drm_gem_object_unreference(gem);
+ goto out;
+ }
+
ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
chan->fence.sequence);
if (ret) {
@@ -774,7 +778,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
OUT_RING(chan, 0);
}
- ret = nouveau_fence_emit(op.fence);
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
@@ -782,7 +786,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
}
out:
- validate_fini(&op, ret == 0);
+ validate_fini(&op, fence);
+ nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(bo);
@@ -915,19 +920,16 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
goto out;
ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
- if (ret == -ERESTART)
- ret = -EAGAIN;
if (ret)
goto out;
}
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+ spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+ spin_unlock(&nvbo->bo.lock);
} else {
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
- if (ret == -ERESTART)
- ret = -EAGAIN;
- else
if (ret == 0)
nvbo->cpu_filp = file_priv;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
new file mode 100644
index 00000000000..c7ebec69674
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+struct nouveau_ctxprog {
+ uint32_t signature;
+ uint8_t version;
+ uint16_t length;
+ uint32_t data[];
+} __attribute__ ((packed));
+
+struct nouveau_ctxvals {
+ uint32_t signature;
+ uint8_t version;
+ uint32_t length;
+ struct {
+ uint32_t offset;
+ uint32_t value;
+ } data[];
+} __attribute__ ((packed));
+
+int
+nouveau_grctx_prog_load(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ const int chipset = dev_priv->chipset;
+ const struct firmware *fw;
+ const struct nouveau_ctxprog *cp;
+ const struct nouveau_ctxvals *cv;
+ char name[32];
+ int ret, i;
+
+ if (pgraph->accel_blocked)
+ return -ENODEV;
+
+ if (!pgraph->ctxprog) {
+ sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
+ return ret;
+ }
+
+ pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxprog) {
+ NV_ERROR(dev, "OOM copying ctxprog\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxprog, fw->data, fw->size);
+
+ cp = pgraph->ctxprog;
+ if (le32_to_cpu(cp->signature) != 0x5043564e ||
+ cp->version != 0 ||
+ le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
+ NV_ERROR(dev, "ctxprog invalid\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ if (!pgraph->ctxvals) {
+ sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
+ nouveau_grctx_fini(dev);
+ return ret;
+ }
+
+ pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxvals) {
+ NV_ERROR(dev, "OOM copying ctxvals\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxvals, fw->data, fw->size);
+
+ cv = (void *)pgraph->ctxvals;
+ if (le32_to_cpu(cv->signature) != 0x5643564e ||
+ cv->version != 0 ||
+ le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
+ NV_ERROR(dev, "ctxvals invalid\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ cp = pgraph->ctxprog;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < le16_to_cpu(cp->length); i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
+ le32_to_cpu(cp->data[i]));
+
+ return 0;
+}
+
+void
+nouveau_grctx_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+ if (pgraph->ctxprog) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxprog = NULL;
+ }
+
+ if (pgraph->ctxvals) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxvals = NULL;
+ }
+}
+
+void
+nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_ctxvals *cv = pgraph->ctxvals;
+ int i;
+
+ if (!cv)
+ return;
+
+ for (i = 0; i < le32_to_cpu(cv->length); i++)
+ nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
+ le32_to_cpu(cv->data[i].value));
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
new file mode 100644
index 00000000000..5d39c4ce800
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -0,0 +1,133 @@
+#ifndef __NOUVEAU_GRCTX_H__
+#define __NOUVEAU_GRCTX_H__
+
+struct nouveau_grctx {
+ struct drm_device *dev;
+
+ enum {
+ NOUVEAU_GRCTX_PROG,
+ NOUVEAU_GRCTX_VALS
+ } mode;
+ void *data;
+
+ uint32_t ctxprog_max;
+ uint32_t ctxprog_len;
+ uint32_t ctxprog_reg;
+ int ctxprog_label[32];
+ uint32_t ctxvals_pos;
+ uint32_t ctxvals_base;
+};
+
+#ifdef CP_CTX
+static inline void
+cp_out(struct nouveau_grctx *ctx, uint32_t inst)
+{
+ uint32_t *ctxprog = ctx->data;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
+ ctxprog[ctx->ctxprog_len++] = inst;
+}
+
+static inline void
+cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
+{
+ cp_out(ctx, CP_LOAD_SR | val);
+}
+
+static inline void
+cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
+{
+ ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
+
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+ ctx->ctxvals_pos = ctx->ctxvals_base + length;
+
+ if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
+ cp_lsr(ctx, length);
+ length = 0;
+ }
+
+ cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
+}
+
+static inline void
+cp_name(struct nouveau_grctx *ctx, int name)
+{
+ uint32_t *ctxprog = ctx->data;
+ int i;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ ctx->ctxprog_label[name] = ctx->ctxprog_len;
+ for (i = 0; i < ctx->ctxprog_len; i++) {
+ if ((ctxprog[i] & 0xfff00000) != 0xff400000)
+ continue;
+ if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
+ continue;
+ ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
+ (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
+ }
+}
+
+static inline void
+_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
+{
+ int ip = 0;
+
+ if (mod != 2) {
+ ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
+ if (ip == 0)
+ ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
+ }
+
+ cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
+ (state ? 0 : CP_BRA_IF_CLEAR));
+}
+#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#ifdef CP_BRA_MOD
+#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
+#endif
+
+static inline void
+_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
+}
+#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+_cp_set(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
+}
+#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+cp_pos(struct nouveau_grctx *ctx, int offset)
+{
+ ctx->ctxvals_pos = offset;
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+
+ cp_lsr(ctx, ctx->ctxvals_pos);
+ cp_out(ctx, CP_SET_CONTEXT_POINTER);
+}
+
+static inline void
+gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
+{
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ reg = (reg - 0x00400000) / 4;
+ reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
+
+ nv_wo32(ctx->dev, ctx->data, reg, val);
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index a2c30f4611b..475ba810bba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -61,12 +61,10 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
#endif
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 370c72c968d..447f9f69d6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
get + 4);
}
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
if (status) {
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
@@ -483,6 +497,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
if (nouveau_pgraph_intr_swmthd(dev, &trap))
unhandled = 1;
+ } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ uint32_t v = nv_rd32(dev, 0x402000);
+ nv_wr32(dev, 0x402000, v);
+
+ /* dump the error anyway for now: it's useful for
+ Gallium development */
+ unhandled = 1;
} else {
unhandled = 1;
}
@@ -559,85 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
- uint32_t status, nsource;
+ uint32_t status;
- status = nv_rd32(dev, NV03_PGRAPH_INTR);
- nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- if (status & 0x00000001) {
- nouveau_pgraph_intr_notify(dev, nsource);
- status &= ~0x00000001;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
- }
+ if (status & 0x00000001) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+ status &= ~0x00000001;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+ }
- if (status & 0x00000010) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+ if (status & 0x00000010) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
- status &= ~0x00000010;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
- }
+ status &= ~0x00000010;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+ }
- if (status & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, 0x400500, 0x00010001);
+ if (status & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, NV03_PGRAPH_INTR,
+ NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) &
+ ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, 0x400500, 0x00010001);
- nv50_graph_context_switch(dev);
+ nv50_graph_context_switch(dev);
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- }
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ }
- if (status & 0x00100000) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_DATA_ERROR);
+ if (status & 0x00100000) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_DATA_ERROR);
- status &= ~0x00100000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
- }
+ status &= ~0x00100000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+ }
- if (status & 0x00200000) {
- int r;
-
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
- NV_ERROR(dev, "magic set 1:\n");
- for (r = 0x408900; r <= 0x408910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
- for (r = 0x408e08; r <= 0x408e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
-
- NV_ERROR(dev, "magic set 2:\n");
- for (r = 0x409900; r <= 0x409910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
- for (r = 0x409e08; r <= 0x409e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
-
- status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
- }
+ if (status & 0x00200000) {
+ int r;
+
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+ NV_ERROR(dev, "magic set 1:\n");
+ for (r = 0x408900; r <= 0x408910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408900,
+ nv_rd32(dev, 0x408904) | 0xc0000000);
+ for (r = 0x408e08; r <= 0x408e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408e08,
+ nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+ NV_ERROR(dev, "magic set 2:\n");
+ for (r = 0x409900; r <= 0x409910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409900,
+ nv_rd32(dev, 0x409904) | 0xc0000000);
+ for (r = 0x409e08; r <= 0x409e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409e08,
+ nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+ status &= ~0x00200000;
+ nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+ }
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
+ status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
- {
- const int isb = (1 << 16) | (1 << 0);
+ {
+ const int isb = (1 << 16) | (1 << 0);
- if ((nv_rd32(dev, 0x400500) & isb) != isb)
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
+ if ((nv_rd32(dev, 0x400500) & isb) != isb)
+ nv_wr32(dev, 0x400500,
+ nv_rd32(dev, 0x400500) | isb);
+ }
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 02755712ed3..2dc09dbd817 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
}
/*
+ * NV10-NV40 tiling helpers
+ */
+
+static void
+nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = addr;
+ tile->size = size;
+ tile->used = !!pitch;
+ nouveau_fence_unref((void **)&tile->fence);
+
+ if (!pfifo->cache_flush(dev))
+ return;
+
+ pfifo->reassign(dev, false);
+ pfifo->cache_flush(dev);
+ pfifo->cache_pull(dev, false);
+
+ nouveau_wait_for_idle(dev);
+
+ pgraph->set_region_tiling(dev, i, addr, size, pitch);
+ pfb->set_region_tiling(dev, i, addr, size, pitch);
+
+ pfifo->cache_pull(dev, true);
+ pfifo->reassign(dev, true);
+}
+
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
+ int i;
+
+ spin_lock(&dev_priv->tile.lock);
+
+ for (i = 0; i < pfb->num_tiles; i++) {
+ if (tile[i].used)
+ /* Tile region in use. */
+ continue;
+
+ if (tile[i].fence &&
+ !nouveau_fence_signalled(tile[i].fence, NULL))
+ /* Pending tile region. */
+ continue;
+
+ if (max(tile[i].addr, addr) <
+ min(tile[i].addr + tile[i].size, addr + size))
+ /* Kill an intersecting tile region. */
+ nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
+
+ if (pitch && !found) {
+ /* Free tile region. */
+ nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
+ found = &tile[i];
+ }
+ }
+
+ spin_unlock(&dev_priv->tile.lock);
+
+ return found;
+}
+
+void
+nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence)
+{
+ if (fence) {
+ /* Mark it as pending. */
+ tile->fence = fence;
+ nouveau_fence_ref(fence);
+ }
+
+ tile->used = false;
+}
+
+/*
* NV50 VM helpers
*/
int
@@ -199,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
uint32_t flags, uint64_t phys)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj **pgt;
- unsigned psz, pfl, pages;
-
- if (virt >= dev_priv->vm_gart_base &&
- (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
- psz = 12;
- pgt = &dev_priv->gart_info.sg_ctxdma;
- pfl = 0x21;
- virt -= dev_priv->vm_gart_base;
- } else
- if (virt >= dev_priv->vm_vram_base &&
- (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
- psz = 16;
- pgt = dev_priv->vm_vram_pt;
- pfl = 0x01;
- virt -= dev_priv->vm_vram_base;
- } else {
- NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
- virt, virt + size - 1);
- return -EINVAL;
+ struct nouveau_gpuobj *pgt;
+ unsigned block;
+ int i;
+
+ virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
+ size = (size >> 16) << 1;
+
+ phys |= ((uint64_t)flags << 32);
+ phys |= 1;
+ if (dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ phys |= 0x30;
}
- pages = size >> psz;
-
dev_priv->engine.instmem.prepare_access(dev, true);
- if (flags & 0x80000000) {
- while (pages--) {
- struct nouveau_gpuobj *pt = pgt[virt >> 29];
- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+ while (size) {
+ unsigned offset_h = upper_32_bits(phys);
+ unsigned offset_l = lower_32_bits(phys);
+ unsigned pte, end;
+
+ for (i = 7; i >= 0; i--) {
+ block = 1 << (i + 1);
+ if (size >= block && !(virt & (block - 1)))
+ break;
+ }
+ offset_l |= (i << 7);
- nv_wo32(dev, pt, pte++, 0x00000000);
- nv_wo32(dev, pt, pte++, 0x00000000);
+ phys += block << 15;
+ size -= block;
- virt += (1 << psz);
- }
- } else {
- while (pages--) {
- struct nouveau_gpuobj *pt = pgt[virt >> 29];
- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
- unsigned offset_h = upper_32_bits(phys) & 0xff;
- unsigned offset_l = lower_32_bits(phys);
+ while (block) {
+ pgt = dev_priv->vm_vram_pt[virt >> 14];
+ pte = virt & 0x3ffe;
- nv_wo32(dev, pt, pte++, offset_l | pfl);
- nv_wo32(dev, pt, pte++, offset_h | flags);
+ end = pte + block;
+ if (end > 16384)
+ end = 16384;
+ block -= (end - pte);
+ virt += (end - pte);
- phys += (1 << psz);
- virt += (1 << psz);
+ while (pte < end) {
+ nv_wo32(dev, pgt, pte++, offset_l);
+ nv_wo32(dev, pgt, pte++, offset_h);
+ }
}
}
dev_priv->engine.instmem.finish_access(dev);
@@ -270,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
void
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
{
- nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *pgt;
+ unsigned pages, pte, end;
+
+ virt -= dev_priv->vm_vram_base;
+ pages = (size >> 16) << 1;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pages) {
+ pgt = dev_priv->vm_vram_pt[virt >> 29];
+ pte = (virt & 0x1ffe0000ULL) >> 15;
+
+ end = pte + pages;
+ if (end > 16384)
+ end = 16384;
+ pages -= (end - pte);
+ virt += (end - pte) << 15;
+
+ while (pte < end)
+ nv_wo32(dev, pgt, pte++, 0);
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ }
}
/*
@@ -297,9 +414,8 @@ void nouveau_mem_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+ nouveau_bo_unpin(dev_priv->vga_ram);
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -407,6 +523,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
return 0;
}
+#if __OS_HAS_AGP
static void nouveau_mem_reset_agp(struct drm_device *dev)
{
uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
@@ -432,10 +549,12 @@ static void nouveau_mem_reset_agp(struct drm_device *dev)
nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
}
+#endif
int
nouveau_mem_init_agp(struct drm_device *dev)
{
+#if __OS_HAS_AGP
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -471,6 +590,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_AGP;
dev_priv->gart_info.aper_base = info.aperture_base;
dev_priv->gart_info.aper_size = info.aperture_size;
+#endif
return 0;
}
@@ -509,6 +629,7 @@ nouveau_mem_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
spin_lock_init(&dev_priv->ttm.bo_list_lock);
+ spin_lock_init(&dev_priv->tile.lock);
dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
@@ -531,6 +652,15 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, true, true, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
+
/* GART */
#if !defined(__powerpc__) && !defined(__ia64__)
if (drm_device_is_agp(dev) && dev->agp) {
@@ -562,6 +692,7 @@ nouveau_mem_init(struct drm_device *dev)
dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
drm_get_resource_len(dev, 1),
DRM_MTRR_WC);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b634..d99dc087f9b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
+ uint32_t flags;
int ret;
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
- TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+ if (nouveau_vram_notify)
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ flags = TTM_PL_FLAG_TT;
+
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
0, 0x0000, false, true, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(ntfy, flags);
if (ret)
goto out_err;
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_DMA_TARGET_PCI;
} else {
target = NV_DMA_TARGET_AGP;
+ if (dev_priv->card_type >= NV_50)
+ offset += dev_priv->vm_gart_base;
}
} else {
NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 93379bb81be..e7c100ba63a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -881,15 +881,16 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
return 0;
}
-static int
+int
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
struct nouveau_gpuobj **gpuobj_ret)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_nouveau_private *dev_priv;
struct nouveau_gpuobj *gpuobj;
if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
return -EINVAL;
+ dev_priv = chan->dev->dev_private;
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index fa1b0e7165b..aa9b310e41b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
* the card will hang early on in the X init process.
*/
# define NV_PMC_ENABLE_UNK13 (1<<13)
+#define NV40_PMC_GRAPH_UNITS 0x00001540
#define NV40_PMC_BACKLIGHT 0x000015f0
# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
#define NV40_PMC_1700 0x00001700
@@ -349,19 +350,19 @@
#define NV04_PGRAPH_BLEND 0x00400824
#define NV04_PGRAPH_STORED_FMT 0x00400830
#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
-#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
-#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
-#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
-#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
+#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
#define NV04_PGRAPH_U_RAM 0x00400D00
-#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
-#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
-#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
-#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
+#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e8..ed1590577b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
nouveau_sgdma_clear(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
-
- NV_DEBUG(nvbe->dev, "\n");
+ struct drm_device *dev;
if (nvbe && nvbe->pages) {
+ dev = nvbe->dev;
+ NV_DEBUG(dev, "\n");
+
if (nvbe->bound)
be->func->unbind(be);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 2ed41d339f6..a4851af5b05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv04_fifo_channel_id;
engine->fifo.create_context = nv04_fifo_create_context;
engine->fifo.destroy_context = nv04_fifo_destroy_context;
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv10_graph_grclass;
engine->graph.init = nv10_graph_init;
engine->graph.takedown = nv10_graph_takedown;
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.unload_context = nv10_graph_unload_context;
+ engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv20_graph_grclass;
engine->graph.init = nv20_graph_init;
engine->graph.takedown = nv20_graph_takedown;
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv30_graph_grclass;
engine->graph.init = nv30_graph_init;
engine->graph.takedown = nv20_graph_takedown;
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
+ engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
engine->graph.grclass = nv40_graph_grclass;
engine->graph.init = nv40_graph_init;
engine->graph.takedown = nv40_graph_takedown;
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.unload_context = nv40_graph_unload_context;
+ engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv40_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
engine->fifo.destroy_context = nv40_fifo_destroy_context;
@@ -292,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
+ struct drm_device *dev = priv;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset >= 0x40)
+ nv_wr32(dev, 0x88054, state);
+ else
+ nv_wr32(dev, 0x1854, state);
+
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -299,12 +325,57 @@ nouveau_vga_set_decode(void *priv, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static int
+nouveau_card_init_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel,
+ (struct drm_file *)-2,
+ NvDmaFB, NvDmaTT);
+ if (ret)
+ return ret;
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+ 0, nouveau_mem_fb_amount(dev),
+ NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+ &gpuobj);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
+ gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
+ gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ return 0;
+out_err:
+ nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ return ret;
+}
+
int
nouveau_card_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine;
- struct nouveau_gpuobj *gpuobj;
int ret;
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
@@ -317,7 +388,7 @@ nouveau_card_init(struct drm_device *dev)
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
if (ret)
- return ret;
+ goto out;
engine = &dev_priv->engine;
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
@@ -325,12 +396,12 @@ nouveau_card_init(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = nouveau_bios_init(dev);
if (ret)
- return ret;
+ goto out;
}
ret = nouveau_gpuobj_early_init(dev);
if (ret)
- return ret;
+ goto out_bios;
/* Initialise instance memory, must happen before mem_init so we
* know exactly how much VRAM we're able to use for "normal"
@@ -338,100 +409,72 @@ nouveau_card_init(struct drm_device *dev)
*/
ret = engine->instmem.init(dev);
if (ret)
- return ret;
+ goto out_gpuobj_early;
/* Setup the memory manager */
ret = nouveau_mem_init(dev);
if (ret)
- return ret;
+ goto out_instmem;
ret = nouveau_gpuobj_init(dev);
if (ret)
- return ret;
+ goto out_mem;
/* PMC */
ret = engine->mc.init(dev);
if (ret)
- return ret;
+ goto out_gpuobj;
/* PTIMER */
ret = engine->timer.init(dev);
if (ret)
- return ret;
+ goto out_mc;
/* PFB */
ret = engine->fb.init(dev);
if (ret)
- return ret;
+ goto out_timer;
- /* PGRAPH */
- ret = engine->graph.init(dev);
- if (ret)
- return ret;
+ if (nouveau_noaccel)
+ engine->graph.accel_blocked = true;
+ else {
+ /* PGRAPH */
+ ret = engine->graph.init(dev);
+ if (ret)
+ goto out_fb;
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- return ret;
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret)
+ goto out_graph;
+ }
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
*/
ret = drm_irq_install(dev);
if (ret)
- return ret;
+ goto out_fifo;
ret = drm_vblank_init(dev, 0);
if (ret)
- return ret;
+ goto out_irq;
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
- ret = nouveau_channel_alloc(dev, &dev_priv->channel,
- (struct drm_file *)-2,
- NvDmaFB, NvDmaTT);
- if (ret)
- return ret;
-
- gpuobj = NULL;
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, nouveau_mem_fb_amount(dev),
- NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
- &gpuobj);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
- gpuobj, NULL);
- if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return ret;
- }
-
- gpuobj = NULL;
- ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &gpuobj, NULL);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
- gpuobj, NULL);
- if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return ret;
+ if (!engine->graph.accel_blocked) {
+ ret = nouveau_card_init_channel(dev);
+ if (ret)
+ goto out_irq;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (dev_priv->card_type >= NV_50) {
+ if (dev_priv->card_type >= NV_50)
ret = nv50_display_create(dev);
- if (ret)
- return ret;
- } else {
+ else
ret = nv04_display_create(dev);
- if (ret)
- return ret;
- }
+ if (ret)
+ goto out_irq;
}
ret = nouveau_backlight_init(dev);
@@ -444,6 +487,34 @@ nouveau_card_init(struct drm_device *dev)
drm_helper_initial_config(dev);
return 0;
+
+out_irq:
+ drm_irq_uninstall(dev);
+out_fifo:
+ if (!nouveau_noaccel)
+ engine->fifo.takedown(dev);
+out_graph:
+ if (!nouveau_noaccel)
+ engine->graph.takedown(dev);
+out_fb:
+ engine->fb.takedown(dev);
+out_timer:
+ engine->timer.takedown(dev);
+out_mc:
+ engine->mc.takedown(dev);
+out_gpuobj:
+ nouveau_gpuobj_takedown(dev);
+out_mem:
+ nouveau_mem_close(dev);
+out_instmem:
+ engine->instmem.takedown(dev);
+out_gpuobj_early:
+ nouveau_gpuobj_late_takedown(dev);
+out_bios:
+ nouveau_bios_takedown(dev);
+out:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ return ret;
}
static void nouveau_card_takedown(struct drm_device *dev)
@@ -461,13 +532,16 @@ static void nouveau_card_takedown(struct drm_device *dev)
dev_priv->channel = NULL;
}
- engine->fifo.takedown(dev);
- engine->graph.takedown(dev);
+ if (!nouveau_noaccel) {
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ }
engine->fb.takedown(dev);
engine->timer.takedown(dev);
engine->mc.takedown(dev);
mutex_lock(&dev->struct_mutex);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
nouveau_sgdma_takedown(dev);
@@ -585,7 +659,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->chipset = (reg0 & 0xff00000) >> 20;
/* NV04 or NV05 */
} else if ((reg0 & 0xff00fff0) == 0x20004000) {
- dev_priv->chipset = 0x04;
+ if (reg0 & 0x00f00000)
+ dev_priv->chipset = 0x05;
+ else
+ dev_priv->chipset = 0x04;
} else
dev_priv->chipset = 0xff;
@@ -665,8 +742,8 @@ static void nouveau_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- /* In the case of an error dev_priv may not be be allocated yet */
- if (dev_priv && dev_priv->card_type)
+ /* In the case of an error dev_priv may not be allocated yet */
+ if (dev_priv)
nouveau_card_takedown(dev);
}
@@ -756,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
+ case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ /* NV40 and NV50 versions are quite different, but register
+ * address is the same. User is supposed to know the card
+ * family anyway... */
+ if (dev_priv->chipset >= 0x40) {
+ getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+ break;
+ }
+ /* FALLTHRU */
default:
NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 187eb84e4da..c385d50f041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,45 +28,17 @@
#include "nouveau_drv.h"
-static struct vm_operations_struct nouveau_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static int
-nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo = vma->vm_private_data;
- int ret;
-
- if (unlikely(bo == NULL))
- return VM_FAULT_NOPAGE;
-
- ret = ttm_vm_ops->fault(vma, vmf);
- return ret;
-}
-
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct drm_nouveau_private *dev_priv =
file_priv->minor->dev->dev_private;
- int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
- ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
- if (unlikely(ret != 0))
- return ret;
-
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- nouveau_ttm_vm_ops = *ttm_vm_ops;
- nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
- }
-
- vma->vm_ops = &nouveau_ttm_vm_ops;
- return 0;
+ return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index b9136360605..d2f143ed97c 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -143,10 +143,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
if (pv->NM2)
- NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
+ NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
else
- NV_TRACE(dev, "vpll: n %d m %d log2p %d\n",
+ NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
pv->N1, pv->M1, pv->log2P);
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
@@ -160,7 +160,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
unsigned char seq1 = 0, crtc17 = 0;
unsigned char crtc1A;
- NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode,
+ NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
nv_crtc->index);
if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
@@ -603,7 +603,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(adjusted_mode);
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
@@ -703,7 +703,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- NV_DEBUG(crtc->dev, "\n");
+ NV_DEBUG_KMS(crtc->dev, "\n");
if (!nv_crtc)
return;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index a5fa51714e8..1d73b15d70d 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- uint8_t saved_seq1, saved_pi, saved_rpc1;
+ uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
/* only implemented for head A for now */
NVSetOwner(dev, 0);
+ saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
+
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
@@ -203,25 +206,25 @@ out:
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
- NV_TRACE(dev, "Load detected on head A\n");
+ NV_INFO(dev, "Load detected on head A\n");
return connector_status_connected;
}
return connector_status_disconnected;
}
-enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
- uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
- int head, present = 0;
+ int head;
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
if (dcb->type == OUTPUT_TV) {
@@ -287,13 +290,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
msleep(5);
- temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
-
- if (dcb->type == OUTPUT_TV)
- present = (nv17_tv_detect(encoder, connector, temp)
- == connector_status_connected);
- else
- present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
+ sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -310,15 +307,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
- if (present) {
- NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
+ return sample;
+}
+
+static enum drm_connector_status
+nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ uint32_t sample = nv17_dac_sample_load(encoder);
+
+ if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+ NV_INFO(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
return connector_status_connected;
+ } else {
+ return connector_status_disconnected;
}
-
- return connector_status_disconnected;
}
-
static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -350,14 +357,10 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int head = nouveau_crtc(encoder->crtc)->index;
- NV_TRACE(dev, "%s called for encoder %d\n", __func__,
- nv_encoder->dcb->index);
-
if (nv_gf4_disp_arch(dev)) {
struct drm_encoder *rebind;
uint32_t dac_offset = nv04_dac_output_offset(encoder);
@@ -466,7 +469,7 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index e5b33339d59..483f875bdb6 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -261,7 +261,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *output_mode = &nv_encoder->mode;
uint32_t mode_ratio, panel_ratio;
- NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(output_mode);
/* Initialize the FP registers in this CRTC. */
@@ -413,7 +413,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
struct dcb_entry *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
- NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index);
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
if (dcbe->type == OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -550,7 +552,7 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index b47c757ff48..ef77215fa5b 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -99,10 +99,11 @@ nv04_display_create(struct drm_device *dev)
uint16_t connector[16] = { 0 };
int i, ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
if (nv_two_heads(dev))
nv04_display_store_initial_head_owner(dev);
+ nouveau_hw_save_vga_fonts(dev, 1);
drm_mode_config_init(dev);
drm_mode_create_scaling_mode_property(dev);
@@ -203,8 +204,6 @@ nv04_display_create(struct drm_device *dev)
/* Save previous state */
NVLockVgaCrtcs(dev, false);
- nouveau_hw_save_vga_fonts(dev, 1);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
@@ -223,7 +222,7 @@ nv04_display_destroy(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -246,9 +245,9 @@ nv04_display_destroy(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
- nouveau_hw_save_vga_fonts(dev, 0);
-
drm_mode_config_cleanup(dev);
+
+ nouveau_hw_save_vga_fonts(dev, 0);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 09a31071ee5..fd01caabd5c 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -55,21 +54,19 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
if (info->state != FBINFO_STATE_RUNNING)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -80,14 +77,18 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
- OUT_RING(chan, color);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING(chan, rect->color);
BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int iter_len = dsize > 128 ? 128 : dsize;
if (RING_SPACE(chan, iter_len + 1)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
return;
}
@@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = par->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ const int sub = NvSubCtxSurf2D;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info)
return ret;
if (RING_SPACE(chan, 49)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, 1, 0x0184, 2);
+ BEGIN_RING(chan, sub, 0x0184, 2);
OUT_RING(chan, NvDmaFB);
OUT_RING(chan, NvDmaFB);
- BEGIN_RING(chan, 1, 0x0300, 4);
+ BEGIN_RING(chan, sub, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, 1, 0x0300, 1);
+ BEGIN_RING(chan, sub, 0x0300, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvImagePatt);
- BEGIN_RING(chan, 1, 0x0300, 8);
+ BEGIN_RING(chan, sub, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
OUT_RING(chan, 2);
@@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
OUT_RING(chan, ~0);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvClipRect);
- BEGIN_RING(chan, 1, 0x0300, 2);
+ BEGIN_RING(chan, sub, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
@@ -308,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
FIRE_RING(chan);
- info->fbops->fb_fillrect = nv04_fbcon_fillrect;
- info->fbops->fb_copyarea = nv04_fbcon_copyarea;
- info->fbops->fb_imageblit = nv04_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 0c3cd53c731..f31347b8c9b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
return (reassign == 1);
}
+bool
+nv04_fifo_cache_flush(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
+ return true;
+
+ } while (ptimer->read(dev) - start < 100000000);
+
+ NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
+
+ return false;
+}
+
+bool
+nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
+{
+ uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
+
+ if (enable) {
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
+ } else {
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+ }
+
+ return !!(pull & 1);
+}
+
int
nv04_fifo_channel_id(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 396ee92118f..e260986ea65 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -28,6 +28,10 @@
#include "nouveau_drv.h"
static uint32_t nv04_graph_ctx_regs[] = {
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
NV04_PGRAPH_CTX_SWITCH1,
NV04_PGRAPH_CTX_SWITCH2,
NV04_PGRAPH_CTX_SWITCH3,
@@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = {
NV04_PGRAPH_PATT_COLOR0,
NV04_PGRAPH_PATT_COLOR1,
NV04_PGRAPH_PATT_COLORRAM+0x00,
- NV04_PGRAPH_PATT_COLORRAM+0x01,
- NV04_PGRAPH_PATT_COLORRAM+0x02,
- NV04_PGRAPH_PATT_COLORRAM+0x03,
NV04_PGRAPH_PATT_COLORRAM+0x04,
- NV04_PGRAPH_PATT_COLORRAM+0x05,
- NV04_PGRAPH_PATT_COLORRAM+0x06,
- NV04_PGRAPH_PATT_COLORRAM+0x07,
NV04_PGRAPH_PATT_COLORRAM+0x08,
- NV04_PGRAPH_PATT_COLORRAM+0x09,
- NV04_PGRAPH_PATT_COLORRAM+0x0A,
- NV04_PGRAPH_PATT_COLORRAM+0x0B,
- NV04_PGRAPH_PATT_COLORRAM+0x0C,
- NV04_PGRAPH_PATT_COLORRAM+0x0D,
- NV04_PGRAPH_PATT_COLORRAM+0x0E,
- NV04_PGRAPH_PATT_COLORRAM+0x0F,
+ NV04_PGRAPH_PATT_COLORRAM+0x0c,
NV04_PGRAPH_PATT_COLORRAM+0x10,
- NV04_PGRAPH_PATT_COLORRAM+0x11,
- NV04_PGRAPH_PATT_COLORRAM+0x12,
- NV04_PGRAPH_PATT_COLORRAM+0x13,
NV04_PGRAPH_PATT_COLORRAM+0x14,
- NV04_PGRAPH_PATT_COLORRAM+0x15,
- NV04_PGRAPH_PATT_COLORRAM+0x16,
- NV04_PGRAPH_PATT_COLORRAM+0x17,
NV04_PGRAPH_PATT_COLORRAM+0x18,
- NV04_PGRAPH_PATT_COLORRAM+0x19,
- NV04_PGRAPH_PATT_COLORRAM+0x1A,
- NV04_PGRAPH_PATT_COLORRAM+0x1B,
- NV04_PGRAPH_PATT_COLORRAM+0x1C,
- NV04_PGRAPH_PATT_COLORRAM+0x1D,
- NV04_PGRAPH_PATT_COLORRAM+0x1E,
- NV04_PGRAPH_PATT_COLORRAM+0x1F,
+ NV04_PGRAPH_PATT_COLORRAM+0x1c,
NV04_PGRAPH_PATT_COLORRAM+0x20,
- NV04_PGRAPH_PATT_COLORRAM+0x21,
- NV04_PGRAPH_PATT_COLORRAM+0x22,
- NV04_PGRAPH_PATT_COLORRAM+0x23,
NV04_PGRAPH_PATT_COLORRAM+0x24,
- NV04_PGRAPH_PATT_COLORRAM+0x25,
- NV04_PGRAPH_PATT_COLORRAM+0x26,
- NV04_PGRAPH_PATT_COLORRAM+0x27,
NV04_PGRAPH_PATT_COLORRAM+0x28,
- NV04_PGRAPH_PATT_COLORRAM+0x29,
- NV04_PGRAPH_PATT_COLORRAM+0x2A,
- NV04_PGRAPH_PATT_COLORRAM+0x2B,
- NV04_PGRAPH_PATT_COLORRAM+0x2C,
- NV04_PGRAPH_PATT_COLORRAM+0x2D,
- NV04_PGRAPH_PATT_COLORRAM+0x2E,
- NV04_PGRAPH_PATT_COLORRAM+0x2F,
+ NV04_PGRAPH_PATT_COLORRAM+0x2c,
NV04_PGRAPH_PATT_COLORRAM+0x30,
- NV04_PGRAPH_PATT_COLORRAM+0x31,
- NV04_PGRAPH_PATT_COLORRAM+0x32,
- NV04_PGRAPH_PATT_COLORRAM+0x33,
NV04_PGRAPH_PATT_COLORRAM+0x34,
- NV04_PGRAPH_PATT_COLORRAM+0x35,
- NV04_PGRAPH_PATT_COLORRAM+0x36,
- NV04_PGRAPH_PATT_COLORRAM+0x37,
NV04_PGRAPH_PATT_COLORRAM+0x38,
- NV04_PGRAPH_PATT_COLORRAM+0x39,
- NV04_PGRAPH_PATT_COLORRAM+0x3A,
- NV04_PGRAPH_PATT_COLORRAM+0x3B,
- NV04_PGRAPH_PATT_COLORRAM+0x3C,
- NV04_PGRAPH_PATT_COLORRAM+0x3D,
- NV04_PGRAPH_PATT_COLORRAM+0x3E,
- NV04_PGRAPH_PATT_COLORRAM+0x3F,
+ NV04_PGRAPH_PATT_COLORRAM+0x3c,
+ NV04_PGRAPH_PATT_COLORRAM+0x40,
+ NV04_PGRAPH_PATT_COLORRAM+0x44,
+ NV04_PGRAPH_PATT_COLORRAM+0x48,
+ NV04_PGRAPH_PATT_COLORRAM+0x4c,
+ NV04_PGRAPH_PATT_COLORRAM+0x50,
+ NV04_PGRAPH_PATT_COLORRAM+0x54,
+ NV04_PGRAPH_PATT_COLORRAM+0x58,
+ NV04_PGRAPH_PATT_COLORRAM+0x5c,
+ NV04_PGRAPH_PATT_COLORRAM+0x60,
+ NV04_PGRAPH_PATT_COLORRAM+0x64,
+ NV04_PGRAPH_PATT_COLORRAM+0x68,
+ NV04_PGRAPH_PATT_COLORRAM+0x6c,
+ NV04_PGRAPH_PATT_COLORRAM+0x70,
+ NV04_PGRAPH_PATT_COLORRAM+0x74,
+ NV04_PGRAPH_PATT_COLORRAM+0x78,
+ NV04_PGRAPH_PATT_COLORRAM+0x7c,
+ NV04_PGRAPH_PATT_COLORRAM+0x80,
+ NV04_PGRAPH_PATT_COLORRAM+0x84,
+ NV04_PGRAPH_PATT_COLORRAM+0x88,
+ NV04_PGRAPH_PATT_COLORRAM+0x8c,
+ NV04_PGRAPH_PATT_COLORRAM+0x90,
+ NV04_PGRAPH_PATT_COLORRAM+0x94,
+ NV04_PGRAPH_PATT_COLORRAM+0x98,
+ NV04_PGRAPH_PATT_COLORRAM+0x9c,
+ NV04_PGRAPH_PATT_COLORRAM+0xa0,
+ NV04_PGRAPH_PATT_COLORRAM+0xa4,
+ NV04_PGRAPH_PATT_COLORRAM+0xa8,
+ NV04_PGRAPH_PATT_COLORRAM+0xac,
+ NV04_PGRAPH_PATT_COLORRAM+0xb0,
+ NV04_PGRAPH_PATT_COLORRAM+0xb4,
+ NV04_PGRAPH_PATT_COLORRAM+0xb8,
+ NV04_PGRAPH_PATT_COLORRAM+0xbc,
+ NV04_PGRAPH_PATT_COLORRAM+0xc0,
+ NV04_PGRAPH_PATT_COLORRAM+0xc4,
+ NV04_PGRAPH_PATT_COLORRAM+0xc8,
+ NV04_PGRAPH_PATT_COLORRAM+0xcc,
+ NV04_PGRAPH_PATT_COLORRAM+0xd0,
+ NV04_PGRAPH_PATT_COLORRAM+0xd4,
+ NV04_PGRAPH_PATT_COLORRAM+0xd8,
+ NV04_PGRAPH_PATT_COLORRAM+0xdc,
+ NV04_PGRAPH_PATT_COLORRAM+0xe0,
+ NV04_PGRAPH_PATT_COLORRAM+0xe4,
+ NV04_PGRAPH_PATT_COLORRAM+0xe8,
+ NV04_PGRAPH_PATT_COLORRAM+0xec,
+ NV04_PGRAPH_PATT_COLORRAM+0xf0,
+ NV04_PGRAPH_PATT_COLORRAM+0xf4,
+ NV04_PGRAPH_PATT_COLORRAM+0xf8,
+ NV04_PGRAPH_PATT_COLORRAM+0xfc,
NV04_PGRAPH_PATTERN,
0x0040080c,
NV04_PGRAPH_PATTERN_SHAPE,
@@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = {
0x004004f8,
0x0040047c,
0x004004fc,
- 0x0040053c,
- 0x00400544,
- 0x00400540,
- 0x00400548,
- 0x00400560,
- 0x00400568,
- 0x00400564,
- 0x0040056c,
0x00400534,
0x00400538,
0x00400514,
@@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = {
0x00400500,
0x00400504,
NV04_PGRAPH_VALID1,
- NV04_PGRAPH_VALID2
-
-
+ NV04_PGRAPH_VALID2,
+ NV04_PGRAPH_DEBUG_3
};
struct graph_state {
@@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev)
pgraph->fifo_access(dev, true);
}
+static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+ if (nv04_graph_ctx_regs[i] == reg)
+ return &ctx->nv04[i];
+ }
+
+ return NULL;
+}
+
int nv04_graph_create_context(struct nouveau_channel *chan)
{
struct graph_state *pgraph_ctx;
@@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
if (pgraph_ctx == NULL)
return -ENOMEM;
- /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
- pgraph_ctx->nv04[0] = 0x0001ffff;
- /* is it really needed ??? */
-#if 0
- dev_priv->fifos[channel].pgraph_ctx[1] =
- nv_rd32(dev, NV_PGRAPH_DEBUG_4);
- dev_priv->fifos[channel].pgraph_ctx[2] =
- nv_rd32(dev, 0x004006b0);
-#endif
+ *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
return 0;
}
@@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
+
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
+
tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+
return 0;
}
@@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev)
nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= dev_priv->engine.fifo.channels << 24;
+ tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
/* These don't belong here, they're part of a per-channel context */
@@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
uint32_t tmp;
@@ -543,11 +547,11 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
- nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
return 0;
}
-static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
@@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
};
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0039, false, nv04_graph_mthds_m2mf },
+ { 0x0039, false, NULL },
{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
@@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
{ 0x0053, false, NULL }, /* surf3d */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
+ { 0x506e, true, nv04_graph_mthds_sw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206625a..a3b9563a6f6 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
* of vram. For now, only reserve a small piece until we know
* more about what each chipset requires.
*/
- switch (dev_priv->chipset & 0xf0) {
+ switch (dev_priv->chipset) {
case 0x40:
case 0x47:
case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index 79e2d104d70..cc5cda44e50 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,17 +3,37 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch) {
+ if (dev_priv->card_type >= NV_20)
+ addr |= 1;
+ else
+ addr |= 1 << 31;
+ }
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), addr);
+}
+
int
nv10_fb_init(struct drm_device *dev)
{
- uint32_t fb_bar_size;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i;
- fb_bar_size = drm_get_resource_len(dev, 0) - 1;
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, NV10_PFB_TILE(i), 0);
- nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
- }
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 6bf6804bb0e..fcf2cdd1949 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -389,49 +389,50 @@ struct graph_state {
int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
struct pipe_state pipe_state;
+ uint32_t lma_window[4];
};
+#define PIPE_SAVE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
+ } while (0)
+
+#define PIPE_RESTORE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+ } while (0)
+
static void nv10_graph_save_pipe(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
- struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
- int i;
-#define PIPE_SAVE(addr) \
- do { \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
- for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
- fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
- } while (0)
-
- PIPE_SAVE(0x4400);
- PIPE_SAVE(0x0200);
- PIPE_SAVE(0x6400);
- PIPE_SAVE(0x6800);
- PIPE_SAVE(0x6c00);
- PIPE_SAVE(0x7000);
- PIPE_SAVE(0x7400);
- PIPE_SAVE(0x7800);
- PIPE_SAVE(0x0040);
- PIPE_SAVE(0x0000);
-
-#undef PIPE_SAVE
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+ PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
}
static void nv10_graph_load_pipe(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
- struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
- int i;
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
uint32_t xfmode0, xfmode1;
-#define PIPE_RESTORE(addr) \
- do { \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
- for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
- } while (0)
-
+ int i;
nouveau_wait_for_idle(dev);
/* XXX check haiku comments */
@@ -457,24 +458,22 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan)
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
- PIPE_RESTORE(0x0200);
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
nouveau_wait_for_idle(dev);
/* restore XFMODE */
nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
- PIPE_RESTORE(0x6400);
- PIPE_RESTORE(0x6800);
- PIPE_RESTORE(0x6c00);
- PIPE_RESTORE(0x7000);
- PIPE_RESTORE(0x7400);
- PIPE_RESTORE(0x7800);
- PIPE_RESTORE(0x4400);
- PIPE_RESTORE(0x0000);
- PIPE_RESTORE(0x0040);
+ PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
+ PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
nouveau_wait_for_idle(dev);
-
-#undef PIPE_RESTORE
}
static void nv10_graph_create_pipe(struct nouveau_channel *chan)
@@ -808,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
chan->pgraph_ctx = NULL;
}
+void
+nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1 << 31;
+
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+}
+
int nv10_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -832,21 +845,16 @@ int nv10_graph_init(struct drm_device *dev)
(1<<31));
if (dev_priv->chipset >= 0x17) {
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ nv_wr32(dev, 0x400a10, 0x3ff3fb6);
+ nv_wr32(dev, 0x400838, 0x2f8684);
+ nv_wr32(dev, 0x40083c, 0x115f3f);
nv_wr32(dev, 0x004006b0, 0x40000020);
} else
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, NV10_PGRAPH_TILE(i),
- nv_rd32(dev, NV10_PFB_TILE(i)));
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
- nv_rd32(dev, NV10_PFB_TSTATUS(i)));
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
@@ -867,6 +875,115 @@ void nv10_graph_takedown(struct drm_device *dev)
{
}
+static int
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *ctx = chan->pgraph_ctx;
+ struct pipe_state *pipe = &ctx->pipe_state;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
+ uint32_t xfmode0, xfmode1;
+ int i;
+
+ ctx->lma_window[(mthd - 0x1638) / 4] = data;
+
+ if (mthd != 0x1644)
+ return 0;
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_SAVE(dev, pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+
+ PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
+
+ nouveau_wait_for_idle(dev);
+
+ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+
+ PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nouveau_wait_for_idle(dev);
+
+ pgraph->fifo_access(dev, true);
+
+ return 0;
+}
+
+static int
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
+ nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
+ nv_wr32(dev, 0x004006b0,
+ nv_rd32(dev, 0x004006b0) | 0x8 << 24);
+
+ pgraph->fifo_access(dev, true);
+
+ return 0;
+}
+
+static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
+ { 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, nv17_graph_mthd_lma_enable },
+ {}
+};
+
struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
{ 0x0030, false, NULL }, /* null */
{ 0x0039, false, NULL }, /* m2mf */
@@ -887,6 +1004,6 @@ struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
{ 0x0095, false, NULL }, /* multitex_tri */
{ 0x0056, false, NULL }, /* celcius (nv10) */
{ 0x0096, false, NULL }, /* celcius (nv11) */
- { 0x0099, false, NULL }, /* celcius (nv17) */
+ { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 46cfd9c6047..21ac6e49b6e 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -33,13 +33,103 @@
#include "nouveau_hw.h"
#include "nv17_tv.h"
-enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
- struct drm_connector *connector,
- uint32_t pin_mask)
+static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
+ fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
+ uint32_t sample = 0;
+ int head;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+ testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
+ if (dev_priv->vbios->tvdactestval)
+ testval = dev_priv->vbios->tvdactestval;
+
+ dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+ head = (dacclk & 0x100) >> 8;
+
+ /* Save the previous state. */
+ gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+ fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
+ fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
+ fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
+ fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+ test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+ ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
+ ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
+ ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
+
+ /* Prepare the DAC for load detection. */
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+ NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
+ NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+ NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+ (dacclk & ~0xff) | 0x22);
+ msleep(1);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+ (dacclk & ~0xff) | 0x21);
+
+ NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
+ NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
+
+ /* Sample pin 0x4 (usually S-video luma). */
+ NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
+ msleep(20);
+ sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+ & 0x4 << 28;
+
+ /* Sample the remaining pins. */
+ NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
+ msleep(20);
+ sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+ & 0xa << 28;
+
+ /* Restore the previous state. */
+ NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
+ NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
+ NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
+
+ return sample;
+}
+
+static enum drm_connector_status
+nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct dcb_entry *dcb = tv_enc->base.dcb;
- tv_enc->pin_mask = pin_mask >> 28 & 0xe;
+ if (dev_priv->chipset == 0x42 ||
+ dev_priv->chipset == 0x43)
+ tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
+ else
+ tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
switch (tv_enc->pin_mask) {
case 0x2:
@@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
break;
case 0xe:
- if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
+ if (dcb->tvconf.has_component_output)
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
else
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
@@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
}
drm_connector_property_set_value(connector,
- encoder->dev->mode_config.tv_subconnector_property,
- tv_enc->subconnector);
+ conf->tv_subconnector_property,
+ tv_enc->subconnector);
- return tv_enc->subconnector ? connector_status_connected :
- connector_status_disconnected;
+ if (tv_enc->subconnector) {
+ NV_INFO(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
+ return connector_status_connected;
+ } else {
+ return connector_status_disconnected;
+ }
}
static const struct {
@@ -219,7 +314,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
return;
nouveau_encoder(encoder)->last_dpms = mode;
- NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
mode, nouveau_encoder(encoder)->dcb->index);
regs->ptv_200 &= ~1;
@@ -484,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
nouveau_encoder(encoder)->restore.output);
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+ nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
}
static int nv17_tv_create_resources(struct drm_encoder *encoder,
@@ -619,7 +716,7 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
{
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(tv_enc);
@@ -633,7 +730,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.prepare = nv17_tv_prepare,
.commit = nv17_tv_commit,
.mode_set = nv17_tv_mode_set,
- .detect = nv17_dac_detect,
+ .detect = nv17_tv_detect,
};
static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 18ba74f1970..d6fc0a82f03 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev)
nouveau_wait_for_idle(dev);
}
+void
+nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1;
+
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+}
+
int
nv20_graph_init(struct drm_device *dev)
{
@@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
}
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, 0x00400904 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- nv_wr32(dev, 0x00400908 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- /* which is NV40_PGRAPH_TSIZE0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- nv_wr32(dev, 0x00400900 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- /* which is NV40_PGRAPH_TILE0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+
for (i = 0; i < 8; i++) {
nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
@@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x4000c0, 0x00000016);
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, 0x00400904 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
- nv_wr32(dev, 0x00400908 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- /* which is NV40_PGRAPH_TSIZE0(i) ?? */
- nv_wr32(dev, 0x00400900 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- /* which is NV40_PGRAPH_TILE0(i) ?? */
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index ca1d27107a8..3cd07d8d5bd 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -3,12 +3,37 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1;
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ break;
+
+ default:
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV40_PFB_TILE(i), addr);
+ break;
+ }
+}
+
int
nv40_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fb_bar_size, tmp;
- int num_tiles;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t tmp;
int i;
/* This is strictly a NV4x register (don't know about NV5x). */
@@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev)
case 0x45:
tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
- num_tiles = NV10_PFB_TILE__SIZE;
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
break;
case 0x46: /* G72 */
case 0x47: /* G70 */
case 0x49: /* G71 */
case 0x4b: /* G73 */
case 0x4c: /* C51 (G7X version) */
- num_tiles = NV40_PFB_TILE__SIZE_1;
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
break;
default:
- num_tiles = NV40_PFB_TILE__SIZE_0;
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
break;
}
- fb_bar_size = drm_get_resource_len(dev, 0) - 1;
- switch (dev_priv->chipset) {
- case 0x40:
- for (i = 0; i < num_tiles; i++) {
- nv_wr32(dev, NV10_PFB_TILE(i), 0);
- nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
- }
- break;
- default:
- for (i = 0; i < num_tiles; i++) {
- nv_wr32(dev, NV40_PFB_TILE(i), 0);
- nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
- }
- break;
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index d3e0a2a6acf..53e8afe1dcd 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -24,36 +24,10 @@
*
*/
-#include <linux/firmware.h>
-
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-
-MODULE_FIRMWARE("nouveau/nv40.ctxprog");
-MODULE_FIRMWARE("nouveau/nv40.ctxvals");
-MODULE_FIRMWARE("nouveau/nv41.ctxprog");
-MODULE_FIRMWARE("nouveau/nv41.ctxvals");
-MODULE_FIRMWARE("nouveau/nv42.ctxprog");
-MODULE_FIRMWARE("nouveau/nv42.ctxvals");
-MODULE_FIRMWARE("nouveau/nv43.ctxprog");
-MODULE_FIRMWARE("nouveau/nv43.ctxvals");
-MODULE_FIRMWARE("nouveau/nv44.ctxprog");
-MODULE_FIRMWARE("nouveau/nv44.ctxvals");
-MODULE_FIRMWARE("nouveau/nv46.ctxprog");
-MODULE_FIRMWARE("nouveau/nv46.ctxvals");
-MODULE_FIRMWARE("nouveau/nv47.ctxprog");
-MODULE_FIRMWARE("nouveau/nv47.ctxvals");
-MODULE_FIRMWARE("nouveau/nv49.ctxprog");
-MODULE_FIRMWARE("nouveau/nv49.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
+#include "nouveau_grctx.h"
struct nouveau_channel *
nv40_graph_channel(struct drm_device *dev)
@@ -83,27 +57,30 @@ nv40_graph_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int ret;
- /* Allocate a 175KiB block of PRAMIN to store the context. This
- * is massive overkill for a lot of chipsets, but it should be safe
- * until we're able to implement this properly (will happen at more
- * or less the same time we're able to write our own context programs.
- */
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
if (ret)
return ret;
- ctx = chan->ramin_grctx->gpuobj;
/* Initialise default context values */
dev_priv->engine.instmem.prepare_access(dev, true);
- nv40_grctx_vals_load(dev, ctx);
- nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
- dev_priv->engine.instmem.finish_access(dev);
+ if (!pgraph->ctxprog) {
+ struct nouveau_grctx ctx = {};
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv40_grctx_init(&ctx);
+ } else {
+ nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
+ }
+ nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
+ chan->ramin_grctx->gpuobj->im_pramin->start);
+ dev_priv->engine.instmem.finish_access(dev);
return 0;
}
@@ -204,133 +181,46 @@ nv40_graph_unload_context(struct drm_device *dev)
return ret;
}
-struct nouveau_ctxprog {
- uint32_t signature;
- uint8_t version;
- uint16_t length;
- uint32_t data[];
-} __attribute__ ((packed));
-
-struct nouveau_ctxvals {
- uint32_t signature;
- uint8_t version;
- uint32_t length;
- struct {
- uint32_t offset;
- uint32_t value;
- } data[];
-} __attribute__ ((packed));
-
-int
-nv40_grctx_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- const int chipset = dev_priv->chipset;
- const struct firmware *fw;
- const struct nouveau_ctxprog *cp;
- const struct nouveau_ctxvals *cv;
- char name[32];
- int ret, i;
-
- pgraph->accel_blocked = true;
-
- if (!pgraph->ctxprog) {
- sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
- return ret;
- }
-
- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
- release_firmware(fw);
- return -ENOMEM;
- }
- memcpy(pgraph->ctxprog, fw->data, fw->size);
-
- cp = pgraph->ctxprog;
- if (cp->signature != 0x5043564e || cp->version != 0 ||
- cp->length != ((fw->size - 7) / 4)) {
- NV_ERROR(dev, "ctxprog invalid\n");
- release_firmware(fw);
- nv40_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- if (!pgraph->ctxvals) {
- sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
- nv40_grctx_fini(dev);
- return ret;
- }
-
- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
- release_firmware(fw);
- nv40_grctx_fini(dev);
- return -ENOMEM;
- }
- memcpy(pgraph->ctxvals, fw->data, fw->size);
-
- cv = (void *)pgraph->ctxvals;
- if (cv->signature != 0x5643564e || cv->version != 0 ||
- cv->length != ((fw->size - 9) / 8)) {
- NV_ERROR(dev, "ctxvals invalid\n");
- release_firmware(fw);
- nv40_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- cp = pgraph->ctxprog;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < cp->length; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
-
- pgraph->accel_blocked = false;
- return 0;
-}
-
void
-nv40_grctx_fini(struct drm_device *dev)
+nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-
- if (pgraph->ctxprog) {
- kfree(pgraph->ctxprog);
- pgraph->ctxprog = NULL;
- }
+ uint32_t limit = max(1u, addr + size) - 1;
- if (pgraph->ctxvals) {
- kfree(pgraph->ctxprog);
- pgraph->ctxvals = NULL;
- }
-}
+ if (pitch)
+ addr |= 1;
-void
-nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_ctxvals *cv = pgraph->ctxvals;
- int i;
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x4a:
+ case 0x4e:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ break;
- if (!cv)
- return;
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ break;
- for (i = 0; i < cv->length; i++)
- nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
+ default:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ break;
+ }
}
/*
@@ -347,7 +237,8 @@ nv40_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv =
(struct drm_nouveau_private *)dev->dev_private;
- uint32_t vramsz, tmp;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t vramsz;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -355,7 +246,26 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- nv40_grctx_init(dev);
+ if (nouveau_ctxfw) {
+ nouveau_grctx_prog_load(dev);
+ dev_priv->engine.graph.grctx_size = 175 * 1024;
+ }
+
+ if (!dev_priv->engine.graph.ctxprog) {
+ struct nouveau_grctx ctx = {};
+ uint32_t cp[256];
+
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 256;
+ nv40_grctx_init(&ctx);
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ }
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -425,74 +335,9 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x400b38, 0x2ffff800);
nv_wr32(dev, 0x400b3c, 0x00006000);
- /* copy tile info from PFB */
- switch (dev_priv->chipset) {
- case 0x40: /* vanilla NV40 */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- tmp = nv_rd32(dev, NV10_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- case 0x44:
- case 0x4a:
- case 0x4e: /* NV44-based cores don't have 0x406900? */
- for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- }
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b: /* G7X-based cores */
- for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- default: /* everything else */
- for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
/* begin RAM config */
vramsz = drm_get_resource_len(dev, 0) - 1;
@@ -535,6 +380,7 @@ nv40_graph_init(struct drm_device *dev)
void nv40_graph_takedown(struct drm_device *dev)
{
+ nouveau_grctx_fini(dev);
}
struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
new file mode 100644
index 00000000000..11b11c31f54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -0,0 +1,678 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* NVIDIA context programs handle a number of other conditions which are
+ * not implemented in our versions. It's not clear why NVIDIA context
+ * programs have this code, nor whether it's strictly necessary for
+ * correct operation. We'll implement additional handling if/when we
+ * discover it's necessary.
+ *
+ * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
+ * flag is set, this gets saved into the context.
+ * - On context save, the context program for all cards load nsource
+ * into a flag register and check for ILLEGAL_MTHD. If it's set,
+ * opcode 0x60000d is called before resuming normal operation.
+ * - Some context programs check more conditions than the above. NV44
+ * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
+ * and calls 0x60000d before resuming normal operation.
+ * - At the very beginning of NVIDIA's context programs, flag 9 is checked
+ * and if true 0x800001 is called with count=0, pos=0, the flag is cleared
+ * and then the ctxprog is aborted. It looks like a complicated NOP,
+ * its purpose is unknown.
+ * - In the section of code that loads the per-vs state, NVIDIA check
+ * flag 10. If it's set, they only transfer the small 0x300 byte block
+ * of state + the state for a single vs as opposed to the state for
+ * all vs units. It doesn't seem likely that it'll occur in normal
+ * operation, especially seeing as it appears NVIDIA may have screwed
+ * up the ctxprogs for some cards and have an invalid instruction
+ * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
+ * - There's a number of places where context offset 0 (where we place
+ * the PRAMIN offset of the context) is loaded into either 0x408000,
+ * 0x408004 or 0x408008. Not sure what's up there either.
+ * - The ctxprogs for some cards save 0x400a00 again during the cleanup
+ * path for auto-loadctx.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_STATUS ((3 * 32) + 0)
+#define CP_FLAG_STATUS_IDLE 0
+#define CP_FLAG_STATUS_BUSY 1
+#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_UNK54 ((3 * 32) + 6)
+#define CP_FLAG_UNK54_CLEAR 0
+#define CP_FLAG_UNK54_SET 1
+#define CP_FLAG_ALWAYS ((3 * 32) + 8)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+#define CP_FLAG_UNK57 ((3 * 32) + 9)
+#define CP_FLAG_UNK57_CLEAR 0
+#define CP_FLAG_UNK57_SET 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000fc000
+#define CP_CTX_COUNT_SHIFT 14
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0000ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEXT_TO_SWAP 0x00600007
+#define CP_NEXT_TO_CURRENT 0x00600009
+#define CP_SET_CONTEXT_POINTER 0x0060000a
+#define CP_END 0x0060000e
+#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
+#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
+#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_grctx.h"
+
+/* TODO:
+ * - get vs count from 0x1540
+ * - document unimplemented bits compared to nvidia
+ * - nsource handling
+ * - R0 & 0x0200 handling
+ * - single-vs handling
+ * - 400314 bit 0
+ */
+
+static int
+nv40_graph_4097(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if ((dev_priv->chipset & 0xf0) == 0x60)
+ return 0;
+
+ return !!(0x0baf & (1 << dev_priv->chipset));
+}
+
+static int
+nv40_graph_vs_count(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ return 8;
+ case 0x40:
+ return 6;
+ case 0x41:
+ case 0x42:
+ return 5;
+ case 0x43:
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ return 3;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ return 1;
+ }
+}
+
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_swap_state3d_3_is_save,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void
+nv40_graph_construct_general(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x4000a4, 1);
+ gr_def(ctx, 0x4000a4, 0x00000008);
+ cp_ctx(ctx, 0x400144, 58);
+ gr_def(ctx, 0x400144, 0x00000001);
+ cp_ctx(ctx, 0x400314, 1);
+ gr_def(ctx, 0x400314, 0x00000000);
+ cp_ctx(ctx, 0x400400, 10);
+ cp_ctx(ctx, 0x400480, 10);
+ cp_ctx(ctx, 0x400500, 19);
+ gr_def(ctx, 0x400514, 0x00040000);
+ gr_def(ctx, 0x400524, 0x55555555);
+ gr_def(ctx, 0x400528, 0x55555555);
+ gr_def(ctx, 0x40052c, 0x55555555);
+ gr_def(ctx, 0x400530, 0x55555555);
+ cp_ctx(ctx, 0x400560, 6);
+ gr_def(ctx, 0x400568, 0x0000ffff);
+ gr_def(ctx, 0x40056c, 0x0000ffff);
+ cp_ctx(ctx, 0x40057c, 5);
+ cp_ctx(ctx, 0x400710, 3);
+ gr_def(ctx, 0x400710, 0x20010001);
+ gr_def(ctx, 0x400714, 0x0f73ef00);
+ cp_ctx(ctx, 0x400724, 1);
+ gr_def(ctx, 0x400724, 0x02008821);
+ cp_ctx(ctx, 0x400770, 3);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x400814, 4);
+ cp_ctx(ctx, 0x400828, 5);
+ cp_ctx(ctx, 0x400840, 5);
+ gr_def(ctx, 0x400850, 0x00000040);
+ cp_ctx(ctx, 0x400858, 4);
+ gr_def(ctx, 0x400858, 0x00000040);
+ gr_def(ctx, 0x40085c, 0x00000040);
+ gr_def(ctx, 0x400864, 0x80000000);
+ cp_ctx(ctx, 0x40086c, 9);
+ gr_def(ctx, 0x40086c, 0x80000000);
+ gr_def(ctx, 0x400870, 0x80000000);
+ gr_def(ctx, 0x400874, 0x80000000);
+ gr_def(ctx, 0x400878, 0x80000000);
+ gr_def(ctx, 0x400888, 0x00000040);
+ gr_def(ctx, 0x40088c, 0x80000000);
+ cp_ctx(ctx, 0x4009c0, 8);
+ gr_def(ctx, 0x4009cc, 0x80000000);
+ gr_def(ctx, 0x4009dc, 0x80000000);
+ } else {
+ cp_ctx(ctx, 0x400840, 20);
+ if (!nv40_graph_4097(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
+ }
+ gr_def(ctx, 0x400880, 0x00000040);
+ gr_def(ctx, 0x400884, 0x00000040);
+ gr_def(ctx, 0x400888, 0x00000040);
+ cp_ctx(ctx, 0x400894, 11);
+ gr_def(ctx, 0x400894, 0x00000040);
+ if (nv40_graph_4097(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
+ }
+ cp_ctx(ctx, 0x4008e0, 2);
+ cp_ctx(ctx, 0x4008f8, 2);
+ if (dev_priv->chipset == 0x4c ||
+ (dev_priv->chipset & 0xf0) == 0x60)
+ cp_ctx(ctx, 0x4009f8, 1);
+ }
+ cp_ctx(ctx, 0x400a00, 73);
+ gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
+ cp_ctx(ctx, 0x401000, 4);
+ cp_ctx(ctx, 0x405004, 1);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x403448, 1);
+ gr_def(ctx, 0x403448, 0x00001010);
+ break;
+ default:
+ cp_ctx(ctx, 0x403440, 1);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x403440, 0x00000010);
+ break;
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ gr_def(ctx, 0x403440, 0x00003010);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ gr_def(ctx, 0x403440, 0x00001010);
+ break;
+ }
+ break;
+ }
+}
+
+static void
+nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401880, 51);
+ gr_def(ctx, 0x401940, 0x00000100);
+ } else
+ if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
+ dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ cp_ctx(ctx, 0x401880, 32);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
+ if (dev_priv->chipset == 0x46)
+ cp_ctx(ctx, 0x401900, 16);
+ cp_ctx(ctx, 0x401940, 3);
+ }
+ cp_ctx(ctx, 0x40194c, 18);
+ gr_def(ctx, 0x401954, 0x00000111);
+ gr_def(ctx, 0x401958, 0x00080060);
+ gr_def(ctx, 0x401974, 0x00000080);
+ gr_def(ctx, 0x401978, 0xffff0000);
+ gr_def(ctx, 0x40197c, 0x00000001);
+ gr_def(ctx, 0x401990, 0x46400000);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x4019a0, 2);
+ cp_ctx(ctx, 0x4019ac, 5);
+ } else {
+ cp_ctx(ctx, 0x4019a0, 1);
+ cp_ctx(ctx, 0x4019b4, 3);
+ }
+ gr_def(ctx, 0x4019bc, 0xffff0000);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x4019c0, 18);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
+ break;
+ }
+ cp_ctx(ctx, 0x401a08, 8);
+ gr_def(ctx, 0x401a10, 0x0fff0000);
+ gr_def(ctx, 0x401a14, 0x0fff0000);
+ gr_def(ctx, 0x401a1c, 0x00011100);
+ cp_ctx(ctx, 0x401a2c, 4);
+ cp_ctx(ctx, 0x401a44, 26);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
+ gr_def(ctx, 0x401a8c, 0x4b7fffff);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401ab8, 3);
+ } else {
+ cp_ctx(ctx, 0x401ab8, 1);
+ cp_ctx(ctx, 0x401ac0, 1);
+ }
+ cp_ctx(ctx, 0x401ad0, 8);
+ gr_def(ctx, 0x401ad0, 0x30201000);
+ gr_def(ctx, 0x401ad4, 0x70605040);
+ gr_def(ctx, 0x401ad8, 0xb8a89888);
+ gr_def(ctx, 0x401adc, 0xf8e8d8c8);
+ cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
+ gr_def(ctx, 0x401b10, 0x40100000);
+ cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
+ gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
+ 0x00000004 : 0x00000000);
+ cp_ctx(ctx, 0x401b30, 25);
+ gr_def(ctx, 0x401b34, 0x0000ffff);
+ gr_def(ctx, 0x401b68, 0x435185d6);
+ gr_def(ctx, 0x401b6c, 0x2155b699);
+ gr_def(ctx, 0x401b70, 0xfedcba98);
+ gr_def(ctx, 0x401b74, 0x00000098);
+ gr_def(ctx, 0x401b84, 0xffffffff);
+ gr_def(ctx, 0x401b88, 0x00ff7000);
+ gr_def(ctx, 0x401b8c, 0x0000ffff);
+ if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
+ dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x401b94, 1);
+ cp_ctx(ctx, 0x401b98, 8);
+ gr_def(ctx, 0x401b9c, 0x00ff0000);
+ cp_ctx(ctx, 0x401bc0, 9);
+ gr_def(ctx, 0x401be0, 0x00ffff00);
+ cp_ctx(ctx, 0x401c00, 192);
+ for (i = 0; i < 16; i++) { /* fragment texture units */
+ gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
+ gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
+ gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
+ gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
+ gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
+ gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
+ }
+ for (i = 0; i < 4; i++) { /* vertex texture units */
+ gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
+ gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
+ gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
+ gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
+ }
+ cp_ctx(ctx, 0x400f5c, 3);
+ gr_def(ctx, 0x400f5c, 0x00000002);
+ cp_ctx(ctx, 0x400f84, 1);
+}
+
+static void
+nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x402000, 1);
+ cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402404, 0x00000001);
+ break;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ gr_def(ctx, 0x402404, 0x00000020);
+ break;
+ case 0x46:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402404, 0x00000421);
+ break;
+ default:
+ gr_def(ctx, 0x402404, 0x00000021);
+ }
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402408, 0x030c30c3);
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ cp_ctx(ctx, 0x402440, 1);
+ gr_def(ctx, 0x402440, 0x00011001);
+ break;
+ default:
+ break;
+ }
+ cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
+ gr_def(ctx, 0x402488, 0x3e020200);
+ gr_def(ctx, 0x40248c, 0x00ffffff);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402490, 0x60103f00);
+ break;
+ case 0x47:
+ gr_def(ctx, 0x402490, 0x40103f00);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402490, 0x20103f00);
+ break;
+ default:
+ gr_def(ctx, 0x402490, 0x0c103f00);
+ break;
+ }
+ gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
+ 0x00020000 : 0x00040000);
+ cp_ctx(ctx, 0x402500, 31);
+ gr_def(ctx, 0x402530, 0x00008100);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x40257c, 6);
+ cp_ctx(ctx, 0x402594, 16);
+ cp_ctx(ctx, 0x402800, 17);
+ gr_def(ctx, 0x402800, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402864, 1);
+ gr_def(ctx, 0x402864, 0x00001001);
+ cp_ctx(ctx, 0x402870, 3);
+ gr_def(ctx, 0x402878, 0x00000003);
+ if (dev_priv->chipset != 0x47) { /* belong at end!! */
+ cp_ctx(ctx, 0x402900, 1);
+ cp_ctx(ctx, 0x402940, 1);
+ cp_ctx(ctx, 0x402980, 1);
+ cp_ctx(ctx, 0x4029c0, 1);
+ cp_ctx(ctx, 0x402a00, 1);
+ cp_ctx(ctx, 0x402a40, 1);
+ cp_ctx(ctx, 0x402a80, 1);
+ cp_ctx(ctx, 0x402ac0, 1);
+ }
+ break;
+ case 0x40:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00000001);
+ cp_ctx(ctx, 0x402850, 1);
+ break;
+ default:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00001001);
+ cp_ctx(ctx, 0x402850, 2);
+ gr_def(ctx, 0x402854, 0x00000003);
+ break;
+ }
+
+ cp_ctx(ctx, 0x402c00, 4);
+ gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
+ 0x80800001 : 0x00888001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402c20, 40);
+ for (i = 0; i < 32; i++)
+ gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
+ cp_ctx(ctx, 0x4030b8, 13);
+ gr_def(ctx, 0x4030dc, 0x00000005);
+ gr_def(ctx, 0x4030e8, 0x0000ffff);
+ break;
+ default:
+ cp_ctx(ctx, 0x402c10, 4);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x402c20, 36);
+ else
+ if (dev_priv->chipset <= 0x42)
+ cp_ctx(ctx, 0x402c20, 24);
+ else
+ if (dev_priv->chipset <= 0x4a)
+ cp_ctx(ctx, 0x402c20, 16);
+ else
+ cp_ctx(ctx, 0x402c20, 8);
+ cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
+ gr_def(ctx, 0x402cd4, 0x00000005);
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402ce0, 0x0000ffff);
+ break;
+ }
+
+ cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
+ for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
+ gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
+
+ if (dev_priv->chipset != 0x40) {
+ cp_ctx(ctx, 0x403600, 1);
+ gr_def(ctx, 0x403600, 0x00000001);
+ }
+ cp_ctx(ctx, 0x403800, 1);
+
+ cp_ctx(ctx, 0x403c18, 1);
+ gr_def(ctx, 0x403c18, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x405018, 1);
+ gr_def(ctx, 0x405018, 0x08e00001);
+ cp_ctx(ctx, 0x405c24, 1);
+ gr_def(ctx, 0x405c24, 0x000e3000);
+ break;
+ }
+ if (dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x405800, 11);
+ cp_ctx(ctx, 0x407000, 1);
+}
+
+static void
+nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
+{
+ int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
+
+ cp_out (ctx, 0x300000);
+ cp_lsr (ctx, len - 4);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
+ cp_lsr (ctx, len);
+ cp_name(ctx, cp_swap_state3d_3_is_save);
+ cp_out (ctx, 0x800001);
+
+ ctx->ctxvals_pos += len;
+}
+
+static void
+nv40_graph_construct_shader(struct nouveau_grctx *ctx)
+{
+ struct drm_device *dev = ctx->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = ctx->data;
+ int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
+ int offset, i;
+
+ vs_nr = nv40_graph_vs_count(ctx->dev);
+ vs_nr_b0 = 363;
+ vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
+ if (dev_priv->chipset == 0x40) {
+ b0_offset = 0x2200/4; /* 33a0 */
+ b1_offset = 0x55a0/4; /* 1500 */
+ vs_len = 0x6aa0/4;
+ } else
+ if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
+ b0_offset = 0x2200/4; /* 2200 */
+ b1_offset = 0x4400/4; /* 0b00 */
+ vs_len = 0x4f00/4;
+ } else {
+ b0_offset = 0x1d40/4; /* 2200 */
+ b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
+ vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
+ }
+
+ cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
+ cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
+
+ offset = ctx->ctxvals_pos;
+ ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
+
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ offset += 0x0280/4;
+ for (i = 0; i < 16; i++, offset += 2)
+ nv_wo32(dev, obj, offset, 0x3f800000);
+
+ for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
+ for (i = 0; i < vs_nr_b0 * 6; i += 6)
+ nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
+ for (i = 0; i < vs_nr_b1 * 4; i += 4)
+ nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
+ }
+}
+
+void
+nv40_grctx_init(struct nouveau_grctx *ctx)
+{
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_name(ctx, cp_setup_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_out (ctx, 0x00910880); /* ?? */
+ cp_out (ctx, 0x00901ffe); /* ?? */
+ cp_out (ctx, 0x01940000); /* ?? */
+ cp_lsr (ctx, 0x20);
+ cp_out (ctx, 0x0060000b); /* ?? */
+ cp_wait(ctx, UNK57, CLEAR);
+ cp_out (ctx, 0x0060000c); /* ?? */
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_pos (ctx, 0x00020/4);
+ nv40_graph_construct_general(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 1 */
+ cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
+ nv40_graph_construct_state3d(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 2 */
+ nv40_graph_construct_state3d_2(ctx);
+
+ /* Some other block of "random" state */
+ nv40_graph_construct_state3d_3(ctx);
+
+ /* Per-vertex shader state */
+ cp_pos (ctx, ctx->ctxvals_pos);
+ nv40_graph_construct_shader(ctx);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_out (ctx, CP_END);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index f8e28a1e44e..d1a651e3400 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -45,7 +45,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
int i;
- NV_DEBUG(crtc->dev, "\n");
+ NV_DEBUG_KMS(crtc->dev, "\n");
for (i = 0; i < 256; i++) {
writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
@@ -68,8 +68,8 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
struct nouveau_channel *evo = dev_priv->evo;
int index = nv_crtc->index, ret;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
- NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked");
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
if (blanked) {
nv_crtc->cursor.hide(nv_crtc, false);
@@ -139,7 +139,7 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
struct nouveau_channel *evo = dev_priv->evo;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
if (ret) {
@@ -193,7 +193,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
uint32_t outX, outY, horiz, vert;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
switch (scaling_mode) {
case DRM_MODE_SCALE_NONE:
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG(dev, "\n");
+ struct drm_device *dev;
+ struct nouveau_crtc *nv_crtc;
if (!crtc)
return;
+ dev = crtc->dev;
+ nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG_KMS(dev, "\n");
+
drm_crtc_cleanup(&nv_crtc->base);
nv50_cursor_fini(nv_crtc);
@@ -432,16 +435,36 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ uint32_t dac = 0, sor = 0;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
/* Disconnect all unused encoders. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- if (drm_helper_encoder_in_use(encoder))
+ if (!drm_helper_encoder_in_use(encoder))
continue;
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
+ nv_encoder->dcb->type == OUTPUT_TV)
+ dac |= (1 << nv_encoder->or);
+ else
+ sor |= (1 << nv_encoder->or);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
+ nv_encoder->dcb->type == OUTPUT_TV) {
+ if (dac & (1 << nv_encoder->or))
+ continue;
+ } else {
+ if (sor & (1 << nv_encoder->or))
+ continue;
+ }
+
nv_encoder->disconnect(nv_encoder);
}
@@ -458,7 +481,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
@@ -497,7 +520,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
int ret, format;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
switch (drm_fb->depth) {
case 8:
@@ -612,7 +635,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
*nv_crtc->mode = *adjusted_mode;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
@@ -706,7 +729,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
struct nouveau_crtc *nv_crtc = NULL;
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
if (!nv_crtc)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index e2e79a8f220..753e723adb3 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -41,7 +41,7 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
struct drm_device *dev = nv_crtc->base.dev;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
if (update && nv_crtc->cursor.visible)
return;
@@ -76,7 +76,7 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
struct drm_device *dev = nv_crtc->base.dev;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
if (update && !nv_crtc->cursor.visible)
return;
@@ -116,7 +116,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
static void
nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
{
- NV_DEBUG(nv_crtc->base.dev, "\n");
+ NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
if (offset == nv_crtc->cursor.offset)
return;
@@ -143,7 +143,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
struct drm_device *dev = nv_crtc->base.dev;
int idx = nv_crtc->index;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index fb5838e3be2..f08f042a8e1 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -44,7 +44,7 @@ nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
struct nouveau_channel *evo = dev_priv->evo;
int ret;
- NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
ret = RING_SPACE(evo, 2);
if (ret) {
@@ -81,11 +81,11 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
/* Use bios provided value if possible. */
if (dev_priv->vbios->dactestval) {
load_pattern = dev_priv->vbios->dactestval;
- NV_DEBUG(dev, "Using bios provided load_pattern of %d\n",
+ NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
load_pattern);
} else {
load_pattern = 340;
- NV_DEBUG(dev, "Using default load_pattern of %d\n",
+ NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
load_pattern);
}
@@ -103,9 +103,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
status = connector_status_connected;
if (status == connector_status_connected)
- NV_DEBUG(dev, "Load was detected on output with or %d\n", or);
+ NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
else
- NV_DEBUG(dev, "Load was not detected on output with or %d\n", or);
+ NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
return status;
}
@@ -118,7 +118,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
uint32_t val;
int or = nv_encoder->or;
- NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+ NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
/* wait for it to be done */
if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
@@ -173,7 +173,7 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *connector;
- NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
connector = nouveau_encoder_connector_get(nv_encoder);
if (!connector) {
@@ -213,7 +213,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
- NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -264,7 +264,7 @@ nv50_dac_destroy(struct drm_encoder *encoder)
if (!encoder)
return;
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
@@ -280,7 +280,7 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
NV_INFO(dev, "Detected a DAC output\n");
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 12c5ee63495..90f0bf59fbc 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -188,7 +188,7 @@ nv50_display_init(struct drm_device *dev)
uint64_t start;
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
/*
@@ -232,7 +232,7 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
/* RAM is clamped to 256 MiB. */
ram_amount = nouveau_mem_fb_amount(dev);
- NV_DEBUG(dev, "ram_amount %d\n", ram_amount);
+ NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
if (ram_amount > 256*1024*1024)
ram_amount = 256*1024*1024;
nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
@@ -398,7 +398,7 @@ static int nv50_display_disable(struct drm_device *dev)
struct drm_crtc *drm_crtc;
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
@@ -469,7 +469,7 @@ int nv50_display_create(struct drm_device *dev)
uint32_t connector[16] = {};
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
/* init basic kernel modesetting */
drm_mode_config_init(dev);
@@ -573,7 +573,7 @@ int nv50_display_destroy(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
@@ -617,7 +617,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
* CRTC separately, and submission will be blocked by the GPU
* until we handle each in turn.
*/
- NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30);
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
head = ffs((unk30 >> 9) & 3) - 1;
if (head < 0)
return -EINVAL;
@@ -661,7 +661,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
or = i;
}
- NV_DEBUG(dev, "type %d, or %d\n", type, or);
+ NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
if (type == OUTPUT_ANY) {
NV_ERROR(dev, "unknown encoder!!\n");
return -1;
@@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
int pxclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_connector *nv_connector = NULL;
+ struct drm_encoder *encoder;
struct nvbios *bios = &dev_priv->VBIOS;
uint32_t mc, script = 0, or;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb != dcbent)
+ continue;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ break;
+ }
+
or = ffs(dcbent->or) - 1;
mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
switch (dcbent->type) {
@@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
} else
if (bios->fp.strapless_is_24bit & 1)
script |= 0x0200;
+
+ if (nv_connector && nv_connector->edid &&
+ (nv_connector->edid->revision >= 4) &&
+ (nv_connector->edid->input & 0x70) >= 0x20)
+ script |= 0x0200;
}
if (nouveau_uscript_lvds >= 0) {
@@ -811,7 +828,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
script = nv50_display_script_select(dev, dcbent, pclk);
- NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk);
+ NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
if (dcbent->type != OUTPUT_DP)
nouveau_bios_run_display_table(dev, dcbent, 0, -2);
@@ -870,7 +887,7 @@ nv50_display_irq_handler_bh(struct work_struct *work)
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
- NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+ NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
nv50_display_unk10_handler(dev);
@@ -974,7 +991,7 @@ nv50_display_irq_handler(struct drm_device *dev)
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
uint32_t clock;
- NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+ NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
if (!intr0 && !(intr1 & ~delayed))
break;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bcc6d39e9b..0f57cdf7ccb 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
@@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, 1);
}
BEGIN_RING(chan, NvSub2D, 0x0588, 1);
- OUT_RING(chan, rect->color);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING(chan, rect->color);
BEGIN_RING(chan, NvSub2D, 0x0600, 4);
OUT_RING(chan, rect->dx);
OUT_RING(chan, rect->dy);
@@ -44,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int push = dwords > 2047 ? 2047 : dwords;
if (RING_SPACE(chan, push + 1)) {
- NV_ERROR(dev,
- "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
return;
}
@@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
ret = RING_SPACE(chan, 59);
if (ret) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ nouveau_fbcon_gpu_lockup(info);
return ret;
}
@@ -265,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
dev_priv->vm_vram_base);
- info->fbops->fb_fillrect = nv50_fbcon_fillrect;
- info->fbops->fb_copyarea = nv50_fbcon_copyarea;
- info->fbops->fb_imageblit = nv50_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 77ae1aaa0bc..204a79ff10f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
return ret;
ramfc = chan->ramfc->gpuobj;
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
0, &chan->cache);
if (ret)
return ret;
@@ -317,17 +317,20 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->cache);
-
+ /* This will ensure the channel is seen as disabled. */
+ chan->ramfc = NULL;
nv50_fifo_channel_disable(dev, chan->id, false);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
nv50_fifo_channel_disable(dev, 127, false);
+
+ nouveau_gpuobj_ref_del(dev, &ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->cache);
}
int
@@ -384,8 +387,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
nv_ro32(dev, cache, (ptr * 2) + 1));
}
- nv_wr32(dev, 0x3210, cnt << 2);
- nv_wr32(dev, 0x3270, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
if (!IS_G80) {
@@ -398,8 +401,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0;
}
@@ -416,7 +417,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
NV_DEBUG(dev, "\n");
chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
return 0;
chan = dev_priv->fifos[chid];
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 177d8229336..6d504801b51 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x401804, 0xc0000000);
+ nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
@@ -107,9 +107,13 @@ nv50_graph_init_regs(struct drm_device *dev)
static int
nv50_graph_init_ctxctl(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
NV_DEBUG(dev, "\n");
- nv40_grctx_init(dev);
+ nouveau_grctx_prog_load(dev);
+ if (!dev_priv->engine.graph.ctxprog)
+ dev_priv->engine.graph.accel_blocked = true;
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -140,7 +144,7 @@ void
nv50_graph_takedown(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
- nv40_grctx_fini(dev);
+ nouveau_grctx_fini(dev);
}
void
@@ -161,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
uint32_t inst;
int i;
+ /* Be sure we're not in the middle of a context switch or bad things
+ * will happen, such as unloading the wrong pgraph context.
+ */
+ if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "Ctxprog is still running\n");
+
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return NULL;
@@ -207,7 +217,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
dev_priv->engine.instmem.prepare_access(dev, true);
- nv40_grctx_vals_load(dev, ctx);
+ nouveau_grctx_vals_load(dev, ctx);
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
if ((dev_priv->chipset & 0xf0) == 0xa0)
nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
@@ -271,19 +281,18 @@ nv50_graph_load_context(struct nouveau_channel *chan)
int
nv50_graph_unload_context(struct drm_device *dev)
{
- uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+ uint32_t inst;
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return 0;
inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
- nv_wr32(dev, 0x400500, fifo & ~1);
+ nouveau_wait_for_idle(dev);
nv_wr32(dev, 0x400784, inst);
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400500, fifo);
nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7..f0dc4e36ef0 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i += 4)
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
+ else
+ dev_priv->vram_sys_base = 0;
+
/* Reserve the last MiB of VRAM, we should probably try to avoid
* setting up the below tables over the top of the VBIOS image at
* some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
* We map the entire fake channel into the start of the PRAMIN BAR
*/
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
- 0, &priv->pramin_pt);
+ 0, &priv->pramin_pt);
if (ret)
return ret;
- for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
- if (v < (c_offset + c_size))
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
- else
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+ v = c_offset | 1;
+ if (dev_priv->vram_sys_base) {
+ v += dev_priv->vram_sys_base;
+ v |= 0x30;
+ }
+
+ i = 0;
+ while (v < dev_priv->vram_sys_base + c_offset + c_size) {
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ v += 0x1000;
+ i += 8;
+ }
+
+ while (i < pt_size) {
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ i += 8;
}
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- uint32_t pte, pte_end, vram;
+ struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+ uint32_t pte, pte_end;
+ uint64_t vram;
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
- pte = (gpuobj->im_pramin->start >> 12) << 3;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ pte = (gpuobj->im_pramin->start >> 12) << 1;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
vram = gpuobj->im_backing_start;
NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+ vram |= 1;
+ if (dev_priv->vram_sys_base) {
+ vram += dev_priv->vram_sys_base;
+ vram |= 0x30;
+ }
+
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
-
- pte += 8;
+ nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
+ nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
vram += NV50_INSTMEM_PAGE_SIZE;
}
dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (gpuobj->im_bound == 0)
return -EINVAL;
- pte = (gpuobj->im_pramin->start >> 12) << 3;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ pte = (gpuobj->im_pramin->start >> 12) << 1;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
- pte += 8;
+ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
}
dev_priv->engine.instmem.finish_access(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 8c280463a66..c2fff543b06 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -44,7 +44,7 @@ nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
struct nouveau_channel *evo = dev_priv->evo;
int ret;
- NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
ret = RING_SPACE(evo, 2);
if (ret) {
@@ -70,7 +70,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
}
if (dpe->script0) {
- NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
nv_encoder->dcb);
}
@@ -79,7 +79,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
if (dpe->script1) {
- NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
nv_encoder->dcb);
}
@@ -90,10 +90,24 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_encoder *enc;
uint32_t val;
int or = nv_encoder->or;
- NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+ NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+
+ nv_encoder->last_dpms = mode;
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nvenc = nouveau_encoder(enc);
+
+ if (nvenc == nv_encoder ||
+ nvenc->disconnect != nv50_sor_disconnect ||
+ nvenc->dcb->or != nv_encoder->dcb->or)
+ continue;
+
+ if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
+ return;
+ }
/* wait for it to be done */
if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
@@ -142,7 +156,7 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *connector;
- NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
connector = nouveau_encoder_connector_get(nv_encoder);
if (!connector) {
@@ -182,7 +196,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0;
int ret;
- NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -246,7 +260,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
if (!encoder)
return;
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
@@ -265,7 +279,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
bool dum;
int type;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
switch (entry->type) {
case OUTPUT_TMDS:
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 601f4c0e5da..b806fdcc717 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -64,7 +64,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index d3cb676eee8..51c99fc4dd3 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
&init->agp_textures_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
}
typedef struct drm_r128_depth32 {
@@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
&depth->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+ return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
}
@@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
&stipple->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+ return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
}
typedef struct drm_r128_getparam32 {
@@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
&getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
}
drm_ioctl_compat_t *r128_compat_ioctls[] = {
@@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d..1c02d23f6fc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default"
+ bool "Enable modesetting on radeon by default - NEW DRIVER"
depends on DRM_RADEON
help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain.
+ Choose this option if you want kernel modesetting enabled by default.
+
+ This is a completely new driver. It's only part of the existing drm
+ for compatibility reasons. It requires an entirely different graphics
+ stack above it and works very differently from the old drm stack.
+ i.e. don't enable this unless you know what you are doing it may
+ cause issues or bugs compared to the previous userspace driver stack.
When kernel modesetting is enabled the IOCTL of radeon/drm
driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index feb52eee431..1cc7b937b1e 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(call if_changed,mkregtable)
@@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h
$(obj)/r300.o: $(obj)/r300_reg_safe.h
+$(obj)/r420.o: $(obj)/r420_reg_safe.h
+
$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
@@ -49,7 +54,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index 6d0183c61d3..c714179d1bf 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -1,5 +1,5 @@
/*
-* Copyright 2006-2007 Advanced Micro Devices, Inc.
+* Copyright 2006-2007 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -41,14 +41,14 @@
/****************************************************/
/* Encoder Object ID Definition */
/****************************************************/
-#define ENCODER_OBJECT_ID_NONE 0x00
+#define ENCODER_OBJECT_ID_NONE 0x00
/* Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
-#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
@@ -56,11 +56,11 @@
#define ENCODER_OBJECT_ID_SI170B 0x08
#define ENCODER_OBJECT_ID_CH7303 0x09
#define ENCODER_OBJECT_ID_CH7301 0x0A
-#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
#define ENCODER_OBJECT_ID_TITFP513 0x0E
-#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
@@ -68,9 +68,9 @@
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
-#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
-#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
-#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
#define ENCODER_OBJECT_ID_VT1625 0x1A
#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
@@ -86,7 +86,7 @@
/****************************************************/
/* Connector Object ID Definition */
/****************************************************/
-#define CONNECTOR_OBJECT_ID_NONE 0x00
+#define CONNECTOR_OBJECT_ID_NONE 0x00
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
@@ -96,7 +96,7 @@
#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
#define CONNECTOR_OBJECT_ID_YPbPr 0x08
#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
-#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
#define CONNECTOR_OBJECT_ID_SCART 0x0B
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
@@ -106,6 +106,8 @@
#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
+#define CONNECTOR_OBJECT_ID_eDP 0x14
+#define CONNECTOR_OBJECT_ID_MXM 0x15
/* deleted */
@@ -116,6 +118,14 @@
#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
/****************************************************/
+/* Generic Object ID Definition */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE 0x00
+#define GENERIC_OBJECT_ID_GLSYNC 0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
+#define GENERIC_OBJECT_ID_MXM_OPM 0x03
+
+/****************************************************/
/* Graphics Object ENUM ID Definition */
/****************************************************/
#define GRAPH_OBJECT_ENUM_ID1 0x01
@@ -124,6 +134,7 @@
#define GRAPH_OBJECT_ENUM_ID4 0x04
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
+#define GRAPH_OBJECT_ENUM_ID7 0x07
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -133,35 +144,35 @@
#define RESERVED1_ID_MASK 0x0800
#define OBJECT_TYPE_MASK 0x7000
#define RESERVED2_ID_MASK 0x8000
-
+
#define OBJECT_ID_SHIFT 0x00
#define ENUM_ID_SHIFT 0x08
#define OBJECT_TYPE_SHIFT 0x0C
+
/****************************************************/
/* Graphics Object family definition */
/****************************************************/
-#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
- (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
- GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+ GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
/****************************************************/
/* GPU Object ID definition - Shared with BIOS */
/****************************************************/
-#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
/****************************************************/
/* Encoder Object ID definition - Shared with BIOS */
/****************************************************/
/*
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
-#define ENCODER_SIL170B_ENUM_ID1 0x2108
+#define ENCODER_SIL170B_ENUM_ID1 0x2108
#define ENCODER_CH7303_ENUM_ID1 0x2109
#define ENCODER_CH7301_ENUM_ID1 0x210A
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
@@ -175,8 +186,8 @@
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
-#define ENCODER_SI178_ENUM_ID1 0x2117
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
+#define ENCODER_SI178_ENUM_ID1 0x2117
#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
#define ENCODER_VT1625_ENUM_ID1 0x211A
@@ -185,205 +196,169 @@
#define ENCODER_DP_DP501_ENUM_ID1 0x211D
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
*/
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_SIL170B_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7303_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7301_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_TITFP513_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1623_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1930_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
-
-#define ENCODER_SI178_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
-
-#define ENCODER_MVPU_FPGA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1625_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1932_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_DP501_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_AN9801_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
@@ -406,167 +381,253 @@
#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
*/
-#define CONNECTOR_LVDS_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_COMPOSITE_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SVIDEO_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_YPbPr_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SCART_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
-#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
/* deleted */
/****************************************************/
+/* Generic Object ID definition - Shared with BIOS */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
+
#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
@@ -575,4 +636,8 @@
#pragma pack()
#endif
-#endif /*GRAPHICTYPE */
+#endif /*GRAPHICTYPE */
+
+
+
+
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 6578d19dff9..7f152f66f19 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#define ATOM_DEBUG
@@ -58,6 +59,7 @@ typedef struct {
} atom_exec_context;
int atom_debug = 0;
+static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
@@ -211,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
- val = le32_to_cpu(ctx->ps[idx]);
+ /* get_unaligned_le32 avoids unaligned accesses from atombios
+ * tables, noticed on a DEC Alpha. */
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -245,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_WS_ATTRIBUTES:
val = gctx->io_attr;
break;
+ case ATOM_WS_REGPTR:
+ val = gctx->reg_block;
+ break;
default:
val = ctx->ws[idx];
}
@@ -384,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
return atom_get_src_int(ctx, attr, ptr, NULL, 1);
}
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+ uint32_t val = 0xCDCDCDCD;
+
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ val = U32(*ptr);
+ (*ptr) += 4;
+ break;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ val = U16(*ptr);
+ (*ptr) += 2;
+ break;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ val = U8(*ptr);
+ (*ptr)++;
+ break;
+ }
+ return val;
+}
+
static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
int *ptr, uint32_t *saved, int print)
{
@@ -481,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_WS_ATTRIBUTES:
gctx->io_attr = val;
break;
+ case ATOM_WS_REGPTR:
+ gctx->reg_block = val;
+ break;
default:
ctx->ws[idx] = val;
}
@@ -573,7 +609,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
}
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -607,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
uint8_t count = U8((*ptr)++);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
- schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+ udelay(count);
else
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
}
@@ -676,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src1: ");
- src1 = atom_get_src(ctx, attr, ptr);
+ src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" src2: ");
src2 = atom_get_src(ctx, attr, ptr);
dst &= src1;
@@ -808,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
}
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst <<= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst >>= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
@@ -817,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- shift = U8((*ptr)++);
+ shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
SDEBUG(" dst: ");
@@ -833,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- shift = U8((*ptr)++);
+ shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
SDEBUG(" dst: ");
@@ -936,18 +1004,18 @@ static struct {
atom_op_or, ATOM_ARG_FB}, {
atom_op_or, ATOM_ARG_PLL}, {
atom_op_or, ATOM_ARG_MC}, {
- atom_op_shl, ATOM_ARG_REG}, {
- atom_op_shl, ATOM_ARG_PS}, {
- atom_op_shl, ATOM_ARG_WS}, {
- atom_op_shl, ATOM_ARG_FB}, {
- atom_op_shl, ATOM_ARG_PLL}, {
- atom_op_shl, ATOM_ARG_MC}, {
- atom_op_shr, ATOM_ARG_REG}, {
- atom_op_shr, ATOM_ARG_PS}, {
- atom_op_shr, ATOM_ARG_WS}, {
- atom_op_shr, ATOM_ARG_FB}, {
- atom_op_shr, ATOM_ARG_PLL}, {
- atom_op_shr, ATOM_ARG_MC}, {
+ atom_op_shift_left, ATOM_ARG_REG}, {
+ atom_op_shift_left, ATOM_ARG_PS}, {
+ atom_op_shift_left, ATOM_ARG_WS}, {
+ atom_op_shift_left, ATOM_ARG_FB}, {
+ atom_op_shift_left, ATOM_ARG_PLL}, {
+ atom_op_shift_left, ATOM_ARG_MC}, {
+ atom_op_shift_right, ATOM_ARG_REG}, {
+ atom_op_shift_right, ATOM_ARG_PS}, {
+ atom_op_shift_right, ATOM_ARG_WS}, {
+ atom_op_shift_right, ATOM_ARG_FB}, {
+ atom_op_shift_right, ATOM_ARG_PLL}, {
+ atom_op_shift_right, ATOM_ARG_MC}, {
atom_op_mul, ATOM_ARG_REG}, {
atom_op_mul, ATOM_ARG_PS}, {
atom_op_mul, ATOM_ARG_WS}, {
@@ -1040,7 +1108,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1057,8 +1125,6 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
- /* reset reg block */
- ctx->reg_block = 0;
ectx.ctx = ctx;
ectx.ps_shift = ps / 4;
ectx.start = base;
@@ -1092,6 +1158,19 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
kfree(ectx.ws);
}
+void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+ mutex_lock(&ctx->mutex);
+ /* reset reg block */
+ ctx->reg_block = 0;
+ /* reset fb window */
+ ctx->fb_base = 0;
+ /* reset io mode */
+ ctx->io_mode = ATOM_IO_MM;
+ atom_execute_table_locked(ctx, index, params);
+ mutex_unlock(&ctx->mutex);
+}
+
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 6671848e5ea..bc73781423a 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
#define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47
+#define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1
@@ -120,6 +121,7 @@ struct card_info {
struct atom_context {
struct card_info *card;
+ struct mutex mutex;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5f48515c77a..91ad0d1c1b1 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 {
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
} ATOM_POWERPLAY_INFO_V3;
+/* New PPlib */
+/**************************************************************************/
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+// remaining 3 bits are reserved
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ UCHAR ucUnused1[3];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
/**************************************************************************/
/* Following definitions are for compatiblity issue in different SW components. */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf59f00..af464e351fb 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- printk("executing set crtc dtd timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- printk("executing set crtc timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
}
}
-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+union adjust_pixel_clock {
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+};
+
+static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct radeon_pll *pll)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
- uint8_t frev, crev;
- int index;
- SET_PIXEL_CLOCK_PS_ALLOCATION args;
- PIXEL_CLOCK_PARAMETERS *spc1_ptr;
- PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
- PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
- uint32_t pll_clock = mode->clock;
- uint32_t adjusted_clock;
- uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
- struct radeon_pll *pll;
- int pll_flags = 0;
+ u32 adjusted_clock = mode->clock;
- memset(&args, 0, sizeof(args));
+ /* reset the pll flags */
+ pll->flags = 0;
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
- RADEON_PLL_PREFER_CLOSEST_LOWER);
+ pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
+ RADEON_PLL_PREFER_CLOSEST_LOWER);
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
} else {
- pll_flags |= RADEON_PLL_LEGACY;
+ pll->flags |= RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
- if (!ASIC_IS_AVIVO(rdev)) {
- if (encoder->encoder_type !=
- DRM_MODE_ENCODER_DAC)
- pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (encoder->encoder_type ==
- DRM_MODE_ENCODER_LVDS)
- pll_flags |= RADEON_PLL_USE_REF_DIV;
- }
radeon_encoder = to_radeon_encoder(encoder);
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+ adjusted_clock = mode->clock * 2;
+ } else {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+ pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ }
break;
}
}
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
* special hw requirements.
*/
if (ASIC_IS_DCE3(rdev)) {
- ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
+ union adjust_pixel_clock args;
+ struct radeon_encoder_atom_dig *dig;
+ u8 frev, crev;
+ int index;
- if (!encoder)
- return;
-
- memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
- adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
- adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
- adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ if (!radeon_encoder->enc_priv)
+ return adjusted_clock;
+ dig = radeon_encoder->enc_priv;
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
- atom_execute_table(rdev->mode_info.atom_context,
- index, (uint32_t *)&adjust_pll_args);
- adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
- } else {
- /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
- if (ASIC_IS_AVIVO(rdev) &&
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
- adjusted_clock = mode->clock * 2;
- else
- adjusted_clock = mode->clock;
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev);
+
+ memset(&args, 0, sizeof(args));
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
+
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
}
+ return adjusted_clock;
+}
+
+union set_pixel_clock {
+ SET_PIXEL_CLOCK_PS_ALLOCATION base;
+ PIXEL_CLOCK_PARAMETERS v1;
+ PIXEL_CLOCK_PARAMETERS_V2 v2;
+ PIXEL_CLOCK_PARAMETERS_V3 v3;
+};
+
+void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+ u32 pll_clock = mode->clock;
+ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ u32 adjusted_clock;
+
+ memset(&args, 0, sizeof(args));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ break;
+ }
+ }
+
+ if (!radeon_encoder)
+ return;
if (radeon_crtc->crtc_id == 0)
pll = &rdev->clock.p1pll;
else
pll = &rdev->clock.p2pll;
+ /* adjust pixel clock as needed */
+ adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll)
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
else
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
} else
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
case 1:
switch (crev) {
case 1:
- spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
- spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc1_ptr->ucFracFbDiv = frac_fb_div;
- spc1_ptr->ucPostDiv = post_div;
- spc1_ptr->ucPpll =
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.usRefDiv = cpu_to_le16(ref_div);
+ args.v1.usFbDiv = cpu_to_le16(fb_div);
+ args.v1.ucFracFbDiv = frac_fb_div;
+ args.v1.ucPostDiv = post_div;
+ args.v1.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
- spc1_ptr->ucRefDivSrc = 1;
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ args.v1.ucRefDivSrc = 1;
break;
case 2:
- spc2_ptr =
- (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
- spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc2_ptr->ucFracFbDiv = frac_fb_div;
- spc2_ptr->ucPostDiv = post_div;
- spc2_ptr->ucPpll =
+ args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v2.usRefDiv = cpu_to_le16(ref_div);
+ args.v2.usFbDiv = cpu_to_le16(fb_div);
+ args.v2.ucFracFbDiv = frac_fb_div;
+ args.v2.ucPostDiv = post_div;
+ args.v2.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
- spc2_ptr->ucRefDivSrc = 1;
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucRefDivSrc = 1;
break;
case 3:
- if (!encoder)
- return;
- spc3_ptr =
- (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
- spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc3_ptr->ucFracFbDiv = frac_fb_div;
- spc3_ptr->ucPostDiv = post_div;
- spc3_ptr->ucPpll =
+ args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.usRefDiv = cpu_to_le16(ref_div);
+ args.v3.usFbDiv = cpu_to_le16(fb_div);
+ args.v3.ucFracFbDiv = frac_fb_div;
+ args.v3.ucPostDiv = post_div;
+ args.v3.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
- spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
- spc3_ptr->ucEncoderMode =
+ args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ args.v3.ucTransmitterId = radeon_encoder->encoder_id;
+ args.v3.ucEncoderMode =
atombios_get_encoder_mode(encoder);
break;
default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
return;
}
- printk("executing set pll\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_AVIVO(rdev))
+ return avivo_crtc_set_base(crtc, x, y, old_fb);
+ else
+ return radeon_crtc_set_base(crtc, x, y, old_fb);
+}
+
+/* properly set additional regs when using atombios */
+static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ u32 disp_merge_cntl;
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+ break;
+ case 1:
+ disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+ break;
+ }
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
else {
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- radeon_crtc_set_base(crtc, x, y, old_fb);
- radeon_legacy_atom_set_surface(crtc);
+ atombios_crtc_set_base(crtc, x, y, old_fb);
+ radeon_legacy_atom_fixup(crtc);
}
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
- atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
atombios_lock_crtc(crtc, 1);
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0d63c4436e7..99915a682d5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
+ int retry_count = 0;
memset(&args, 0, sizeof(args));
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+retry:
memcpy(base, req_bytes, num_bytes);
args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus) {
- DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+ if (args.ucReplyStatus && !args.ucDataOutLen) {
+ if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+ goto retry;
+ DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus);
+ chan->rec.i2c_id, args.ucReplyStatus, retry_count);
return false;
}
@@ -468,7 +472,8 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
radeon_connector = to_radeon_connector(connector);
@@ -582,7 +587,8 @@ void dp_link_train(struct drm_encoder *encoder,
u8 train_set[4];
int i;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
if (!radeon_encoder->enc_priv)
@@ -594,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder,
return;
dig_connector = radeon_connector->con_priv;
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
- enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
- else
- enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
- if (dig_connector->linkb)
- enc_id |= ATOM_DP_CONFIG_LINK_B;
- else
- enc_id |= ATOM_DP_CONFIG_LINK_A;
- } else {
- if (dig_connector->linkb)
- enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
- else
- enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
- }
+ if (dig->dig_encoder)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_LINK_A;
memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
if (dig_connector->dp_clock == 270000)
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 0d79577c157..607241c6a8a 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename)
fseek(file, 0, SEEK_SET);
/* get header */
- if (fgets(buf, 1024, file) == NULL)
+ if (fgets(buf, 1024, file) == NULL) {
+ fclose(file);
return -1;
+ }
/* first line will contain the last register
* and gpu name */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 824cc6480a0..c0d4650cdb7 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -131,7 +131,8 @@ void r100_hpd_init(struct radeon_device *rdev)
break;
}
}
- r100_irq_set(rdev);
+ if (rdev->irq.installed)
+ r100_irq_set(rdev);
}
void r100_hpd_fini(struct radeon_device *rdev)
@@ -243,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
@@ -348,14 +354,25 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- /* Who ever call radeon_fence_emit should call ring_lock and ask
- * for enough space (today caller are ib schedule and buffer move) */
+ /* We have to make sure that caches are flushed before
+ * CPU might read something from VRAM. */
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
@@ -1374,7 +1391,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_ARGB4444:
case RADEON_TXFORMAT_VYUY422:
case RADEON_TXFORMAT_YVYU422:
- case RADEON_TXFORMAT_DXT1:
case RADEON_TXFORMAT_SHADOW16:
case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88:
@@ -1382,12 +1398,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
break;
case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888:
- case RADEON_TXFORMAT_DXT23:
- case RADEON_TXFORMAT_DXT45:
case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4;
break;
+ case RADEON_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case RADEON_TXFORMAT_DXT23:
+ case RADEON_TXFORMAT_DXT45:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
@@ -1487,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
+ track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track);
@@ -1707,14 +1731,6 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
}
-void r100_hdp_flush(struct radeon_device *rdev)
-{
- u32 tmp;
- tmp = RREG32(RADEON_HOST_PATH_CNTL);
- tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
-}
-
void r100_hdp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -2731,6 +2747,7 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+ DRM_ERROR("compress format %d\n", t->compress_format);
}
static int r100_cs_track_cube(struct radeon_device *rdev,
@@ -2760,6 +2777,36 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
return 0;
}
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+ int block_width, block_height, block_bytes;
+ int wblocks, hblocks;
+ int min_wblocks;
+ int sz;
+
+ block_width = 4;
+ block_height = 4;
+
+ switch (compress_format) {
+ case R100_TRACK_COMP_DXT1:
+ block_bytes = 8;
+ min_wblocks = 4;
+ break;
+ default:
+ case R100_TRACK_COMP_DXT35:
+ block_bytes = 16;
+ min_wblocks = 2;
+ break;
+ }
+
+ hblocks = (h + block_height - 1) / block_height;
+ wblocks = (w + block_width - 1) / block_width;
+ if (wblocks < min_wblocks)
+ wblocks = min_wblocks;
+ sz = wblocks * hblocks * block_bytes;
+ return sz;
+}
+
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
@@ -2797,9 +2844,15 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
- size += w * h;
+ if (track->textures[u].compress_format) {
+
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h);
+ /* compressed textures are block based */
+ } else
+ size += w * h;
}
size *= track->textures[u].cpp;
+
switch (track->textures[u].tex_coord_type) {
case 0:
break;
@@ -2838,6 +2891,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_cb; i++) {
if (track->cb[i].robj == NULL) {
+ if (!(track->fastfill || track->color_channel_mask ||
+ track->blend_read_enable)) {
+ continue;
+ }
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
@@ -2967,6 +3024,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
@@ -3265,6 +3323,7 @@ static int r100_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -3316,13 +3375,13 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
@@ -3346,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
@@ -3399,6 +3456,8 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r100_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -3427,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index ca50903dd2b..b27a6999d21 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -28,6 +28,10 @@ struct r100_cs_cube_info {
unsigned height;
};
+#define R100_TRACK_COMP_NONE 0
+#define R100_TRACK_COMP_DXT1 1
+#define R100_TRACK_COMP_DXT35 2
+
struct r100_cs_track_texture {
struct radeon_bo *robj;
struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
@@ -44,6 +48,7 @@ struct r100_cs_track_texture {
bool enabled;
bool roundup_w;
bool roundup_h;
+ unsigned compress_format;
};
struct r100_cs_track_limits {
@@ -62,13 +67,15 @@ struct r100_cs_track {
unsigned immd_dwords;
unsigned num_arrays;
unsigned max_indx;
+ unsigned color_channel_mask;
struct r100_cs_track_array arrays[11];
struct r100_cs_track_cb cb[R300_MAX_CB];
struct r100_cs_track_cb zb;
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
bool z_enabled;
bool separate_cube;
-
+ bool fastfill;
+ bool blend_read_enable;
};
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index eb740fc3549..ff1e0cd608b 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case 5:
case 6:
case 7:
+ /* 1D/2D */
track->textures[i].tex_coord_type = 0;
break;
case 1:
- track->textures[i].tex_coord_type = 1;
+ /* CUBE */
+ track->textures[i].tex_coord_type = 2;
break;
case 2:
- track->textures[i].tex_coord_type = 2;
+ /* 3D */
+ track->textures[i].tex_coord_type = 1;
break;
}
break;
@@ -401,7 +404,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
break;
- case R200_TXFORMAT_DXT1:
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
case R200_TXFORMAT_RGB565:
@@ -418,9 +420,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_ABGR8888:
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
+ track->textures[i].cpp = 4;
+ break;
+ case R200_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
case R200_TXFORMAT_DXT23:
case R200_TXFORMAT_DXT45:
- track->textures[i].cpp = 4;
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 83378c39d0e..43b55a030b4 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -36,7 +36,15 @@
#include "rv350d.h"
#include "r300_reg_safe.h"
-/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ * However, scheduling such write to the ring seems harmless, i suspect
+ * the CP read collide with the flush somehow, or maybe the MC, hard to
+ * tell. (Jerome Glisse)
+ */
/*
* rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
@@ -493,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
+
tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
+ tmp &= R300_MEM_NUM_CHANNELS_MASK;
+ switch (tmp) {
+ case 0: rdev->mc.vram_width = 64; break;
+ case 1: rdev->mc.vram_width = 128; break;
+ case 2: rdev->mc.vram_width = 256; break;
+ default: rdev->mc.vram_width = 128; break;
}
r100_vram_init_sizes(rdev);
@@ -686,7 +702,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
track->textures[i].robj = reloc->robj;
break;
/* Tracked registers */
@@ -852,7 +876,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_Z6Y5X5:
case R300_TX_FORMAT_W4Z4Y4X4:
case R300_TX_FORMAT_W1Z5Y5X5:
- case R300_TX_FORMAT_DXT1:
case R300_TX_FORMAT_D3DMFT_CxV8U8:
case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8:
@@ -866,8 +889,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x17:
case R300_TX_FORMAT_FL_I32:
case 0x1e:
- case R300_TX_FORMAT_DXT3:
- case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 4;
break;
case R300_TX_FORMAT_W16Z16Y16X16:
@@ -878,6 +899,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16;
break;
+ case R300_TX_FORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case R300_TX_FORMAT_ATI2N:
+ if (p->rdev->family < CHIP_R420) {
+ DRM_ERROR("Invalid texture format %u\n",
+ (idx_value & 0x1F));
+ return -EINVAL;
+ }
+ /* The same rules apply as for DXT3/5. */
+ /* Pass through. */
+ case R300_TX_FORMAT_DXT3:
+ case R300_TX_FORMAT_DXT5:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
default:
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
@@ -937,6 +975,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].width_11 = tmp;
tmp = ((idx_value >> 16) & 1) << 11;
track->textures[i].height_11 = tmp;
+
+ /* ATI1N */
+ if (idx_value & (1 << 14)) {
+ /* The same rules apply as for DXT1. */
+ track->textures[i].compress_format =
+ R100_TRACK_COMP_DXT1;
+ }
+ } else if (idx_value & (1 << 14)) {
+ DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+ return -EINVAL;
}
break;
case 0x4480:
@@ -978,6 +1026,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
+ case 0x4e0c:
+ /* RB3D_COLOR_CHANNEL_MASK */
+ track->color_channel_mask = idx_value;
+ break;
+ case 0x4d1c:
+ /* ZB_BW_CNTL */
+ track->fastfill = !!(idx_value & (1 << 2));
+ break;
+ case 0x4e04:
+ /* RB3D_BLENDCNTL */
+ track->blend_read_enable = !!(idx_value & (1 << 2));
+ break;
case 0x4be8:
/* valid register only on RV530 */
if (p->rdev->family == CHIP_RV530)
@@ -1214,6 +1274,7 @@ static int r300_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -1269,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1278,6 +1338,7 @@ void r300_fini(struct radeon_device *rdev)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
@@ -1324,6 +1385,8 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -1357,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
+ radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index cb2e470f97d..34bffa0e4b7 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
int sz;
int addr;
int type;
- int clamp;
+ int isclamp;
int stride;
RING_LOCALS;
@@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
- clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+ isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
addr |= (type << 16);
- addr |= (clamp << 17);
+ addr |= (isclamp << 17);
stride = type ? 4 : 6;
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 4b7afef35a6..1735a2b6958 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -900,6 +900,7 @@
# define R300_TX_FORMAT_FL_I32 0x1B
# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
+# define R300_TX_FORMAT_ATI2N 0x1F
/* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c05a7270cf0..d9373246c97 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -30,7 +30,15 @@
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
+#include "r100d.h"
#include "r420d.h"
+#include "r420_reg_safe.h"
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
int r420_mc_init(struct radeon_device *rdev)
{
@@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
@@ -165,6 +171,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
}
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+ /* RV410 and R420 can lock up if CP DMA to host memory happens
+ * while the 2D engine is busy.
+ *
+ * The proper workaround is to queue a RESYNC at the beginning
+ * of the CP init, apparently.
+ */
+ radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+ /* Catch the RESYNC we dispatched all the way back,
+ * at the very beginning of the CP init.
+ */
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev);
+ radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
static int r420_startup(struct radeon_device *rdev)
{
int r;
@@ -190,12 +224,14 @@ static int r420_startup(struct radeon_device *rdev)
r420_pipes_init(rdev);
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
+ r420_cp_errata_init(rdev);
r = r100_wb_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
@@ -238,6 +274,7 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
+ r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -346,22 +383,21 @@ int r420_init(struct radeon_device *rdev)
if (r)
return r;
}
- r300_set_reg_safe(rdev);
+ r420_set_reg_safe(rdev);
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r420_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0f3843b6dac..ddf5731eba0 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -293,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 36656bd110b..2ffcf5a0355 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev)
}
}
}
- r600_irq_set(rdev);
+ if (rdev->irq.installed)
+ r600_irq_set(rdev);
}
void r600_hpd_fini(struct radeon_device *rdev)
@@ -623,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
fixed20_12 a;
u32 tmp;
int chansize, numchan;
- int r;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -666,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r)
- return r;
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -726,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev)
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
return 0;
}
@@ -1384,11 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA);
}
-void r600_hdp_flush(struct radeon_device *rdev)
-{
- WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-}
-
/*
* CP & Ring
*/
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.align_mask = 16 - 1;
}
+void r600_cp_fini(struct radeon_device *rdev)
+{
+ r600_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
+
/*
* GPU scratch registers helpers function.
@@ -1785,28 +1787,31 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
}
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence)
-{
- /* FIXME: implement */
- return 0;
-}
-
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
- r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ int r;
+
+ mutex_lock(&rdev->r600_blit.mutex);
+ rdev->r600_blit.vb_ib = NULL;
+ r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (rdev->r600_blit.vb_ib)
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ mutex_unlock(&rdev->r600_blit.mutex);
+ return r;
+ }
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
+ mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -1862,18 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ r = r600_blit_init(rdev);
if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+ /* pin copy shader into vram */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
}
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1938,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio resume failed\n");
+ return r;
+ }
+
return r;
}
@@ -1945,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev)
{
int r;
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
+ r600_irq_suspend(rdev);
r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ }
return 0;
}
@@ -2016,6 +2039,11 @@ int r600_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
r = r600_mc_init(rdev);
if (r)
return r;
@@ -2038,52 +2066,50 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
-
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
- r600_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
+
+ r = r600_audio_init(rdev);
+ if (r)
+ return r; /* TODO error handling */
return 0;
}
void r600_fini(struct radeon_device *rdev)
{
- /* Suspend operations */
- r600_suspend(rdev);
-
+ r600_audio_fini(rdev);
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- if (rdev->flags & RADEON_IS_AGP)
- radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -2189,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
- rdev->ih.align_mask = 4 - 1;
+ rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+ rdev->ih.rptr = 0;
}
-static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+static int r600_ih_ring_alloc(struct radeon_device *rdev)
{
int r;
- rdev->ih.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2226,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
return r;
}
}
- rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
- rdev->ih.rptr = 0;
-
return 0;
}
@@ -2378,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
/* allocate ring */
- ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+ ret = r600_ih_ring_alloc(rdev);
if (ret)
return ret;
@@ -2441,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev)
return ret;
}
-void r600_irq_fini(struct radeon_device *rdev)
+void r600_irq_suspend(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_irq_suspend(rdev);
r600_ih_ring_fini(rdev);
}
@@ -2454,9 +2482,17 @@ int r600_irq_set(struct radeon_device *rdev)
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
/* don't enable anything if the ih is disabled */
- if (!rdev->ih.enabled)
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ r600_disable_interrupt_state(rdev);
return 0;
+ }
if (ASIC_IS_DCE3(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2626,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
- WARN_ON(1);
- /* XXX deal with overflow */
- DRM_ERROR("IH RB overflow\n");
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
}
- wptr = wptr & WPTR_OFFSET_MASK;
-
- return wptr;
+ return (wptr & rdev->ih.ptr_mask);
}
/* r600 IV Ring
@@ -2671,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
- u32 last_entry = rdev->ih.ring_size - 16;
u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
unsigned long flags;
bool queue_hotplug = false;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
spin_lock_irqsave(&rdev->ih.lock, flags);
@@ -2717,7 +2756,7 @@ restart_ih:
}
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
@@ -2737,7 +2776,7 @@ restart_ih:
}
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
@@ -2786,7 +2825,7 @@ restart_ih:
}
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
@@ -2800,15 +2839,13 @@ restart_ih:
DRM_DEBUG("IH: CP EOP\n");
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
/* wptr/rptr are in bytes! */
- if (rptr == last_entry)
- rptr = 0;
- else
- rptr += 16;
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
}
/* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev);
@@ -2876,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
return 0;
#endif
}
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
new file mode 100644
index 00000000000..0dcb6904c4f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_reg.h"
+#include "atom.h"
+
+#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
+
+/*
+ * check if the chipset is supported
+ */
+static int r600_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
+ || rdev->family == CHIP_RS600
+ || rdev->family == CHIP_RS690
+ || rdev->family == CHIP_RS740;
+}
+
+/*
+ * current number of channels
+ */
+static int r600_audio_channels(struct radeon_device *rdev)
+{
+ return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
+}
+
+/*
+ * current bits per sample
+ */
+static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+{
+ uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
+ switch (value) {
+ case 0x0: return 8;
+ case 0x1: return 16;
+ case 0x2: return 20;
+ case 0x3: return 24;
+ case 0x4: return 32;
+ }
+
+ DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
+
+ return 16;
+}
+
+/*
+ * current sampling rate in HZ
+ */
+static int r600_audio_rate(struct radeon_device *rdev)
+{
+ uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+ uint32_t result;
+
+ if (value & 0x4000)
+ result = 44100;
+ else
+ result = 48000;
+
+ result *= ((value >> 11) & 0x7) + 1;
+ result /= ((value >> 8) & 0x7) + 1;
+
+ return result;
+}
+
+/*
+ * iec 60958 status bits
+ */
+static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+{
+ return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
+}
+
+/*
+ * iec 60958 category code
+ */
+static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+{
+ return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+static void r600_audio_update_hdmi(unsigned long param)
+{
+ struct radeon_device *rdev = (struct radeon_device *)param;
+ struct drm_device *dev = rdev->ddev;
+
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
+ struct drm_encoder *encoder;
+ int changes = 0;
+
+ changes |= channels != rdev->audio_channels;
+ changes |= rate != rdev->audio_rate;
+ changes |= bps != rdev->audio_bits_per_sample;
+ changes |= status_bits != rdev->audio_status_bits;
+ changes |= category_code != rdev->audio_category_code;
+
+ if (changes) {
+ rdev->audio_channels = channels;
+ rdev->audio_rate = rate;
+ rdev->audio_bits_per_sample = bps;
+ rdev->audio_status_bits = status_bits;
+ rdev->audio_category_code = category_code;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (changes || r600_hdmi_buffer_status_changed(encoder))
+ r600_hdmi_update_audio_settings(
+ encoder, channels,
+ rate, bps, status_bits,
+ category_code);
+ }
+
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
+ * initialize the audio vars and register the update timer
+ */
+int r600_audio_init(struct radeon_device *rdev)
+{
+ if (!r600_audio_chipset_supported(rdev))
+ return 0;
+
+ DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
+ WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
+
+ rdev->audio_channels = -1;
+ rdev->audio_rate = -1;
+ rdev->audio_bits_per_sample = -1;
+ rdev->audio_status_bits = 0;
+ rdev->audio_category_code = 0;
+
+ setup_timer(
+ &rdev->audio_timer,
+ r600_audio_update_hdmi,
+ (unsigned long)rdev);
+
+ mod_timer(&rdev->audio_timer, jiffies + 1);
+
+ return 0;
+}
+
+/*
+ * determin how the encoders and audio interface is wired together
+ */
+int r600_audio_tmds_index(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *other;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ return 0;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ /* special case check if an TMDS1 is present */
+ list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
+ if (to_radeon_encoder(other)->encoder_id ==
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1)
+ return 1;
+ }
+ return 0;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ return 1;
+
+ default:
+ DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return -1;
+ }
+}
+
+/*
+ * atach the audio codec to the clock source of the encoder
+ */
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int base_rate = 48000;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
+ break;
+
+ default:
+ DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
+ }
+
+ switch (r600_audio_tmds_index(encoder)) {
+ case 0:
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock*100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+ break;
+
+ case 1:
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock*100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+ break;
+ }
+}
+
+/*
+ * release the audio timer
+ * TODO: How to do this correctly on SMP systems?
+ */
+void r600_audio_fini(struct radeon_device *rdev)
+{
+ if (!r600_audio_chipset_supported(rdev))
+ return;
+
+ del_timer(&rdev->audio_timer);
+ WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9aecafb51b6..446b765ac72 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -512,14 +513,16 @@ void r600_blit_fini(struct radeon_device *rdev)
{
int r;
+ if (rdev->r600_blit.shader_obj == NULL)
+ return;
+ /* If we can't reserve the bo, unref should be enough to destroy
+ * it when it becomes idle.
+ */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0)) {
- dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
- goto out_unref;
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-out_unref:
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
@@ -540,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
- mutex_unlock(&rdev->ib_pool.mutex);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
@@ -555,7 +555,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
int dwords_per_loop = 76, num_loops;
r = r600_vb_ib_get(rdev);
- WARN_ON(r);
+ if (r)
+ return r;
/* set_render_target emits 2 extra dwords on rv6xx */
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
@@ -577,11 +578,12 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 5; /* fence emit for VB IB */
+ ring_size += 7; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 5; /* fence emit for done copy */
+ ring_size += 7; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
- WARN_ON(r);
+ if (r)
+ return r;
set_default_state(rdev); /* 14 */
set_shaders(rdev); /* 26 */
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711c2e9..75bcf35a093 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+ dev_priv->r600_max_backends,
+ (0xff << dev_priv->r600_max_backends) & 0xff);
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0d820764f34..e4c45ec1650 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+struct r600_cs_track {
+ u32 cb_color0_base_last;
+};
+
/**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
@@ -170,13 +174,35 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
idx, relocs_chunk->length_dw);
return -EINVAL;
}
- *cs_reloc = &p->relocs[0];
+ *cs_reloc = p->relocs;
(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
return 0;
}
/**
+ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
* r600_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_cs_reloc *reloc;
+ struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r;
u32 idx_value;
+ track = (struct r600_cs_track *)p->track;
ib = p->ib->ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
switch (reg) {
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color0_base_last) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] = track->cb_color0_base_last;
+ printk_once(KERN_WARNING "radeon: You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
case DB_DEPTH_BASE:
case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color0_base_last = ib[idx+1+i];
+ break;
case CB_COLOR1_BASE:
case CB_COLOR2_BASE:
case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r600_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
+ struct r600_cs_track *track;
int r;
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ p->track = track;
do {
r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
@@ -717,7 +799,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
if (p->chunk_relocs_idx == -1) {
return 0;
}
- p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
+ parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 00000000000..fcc949df0e5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * HDMI color format
+ */
+enum r600_hdmi_color_format {
+ RGB = 0,
+ YCC_422 = 1,
+ YCC_444 = 2
+};
+
+/*
+ * IEC60958 status bits
+ */
+enum r600_hdmi_iec_status_bits {
+ AUDIO_STATUS_DIG_ENABLE = 0x01,
+ AUDIO_STATUS_V = 0x02,
+ AUDIO_STATUS_VCFG = 0x04,
+ AUDIO_STATUS_EMPHASIS = 0x08,
+ AUDIO_STATUS_COPYRIGHT = 0x10,
+ AUDIO_STATUS_NONAUDIO = 0x20,
+ AUDIO_STATUS_PROFESSIONAL = 0x40,
+ AUDIO_STATUS_LEVEL = 0x80
+};
+
+struct {
+ uint32_t Clock;
+
+ int N_32kHz;
+ int CTS_32kHz;
+
+ int N_44_1kHz;
+ int CTS_44_1kHz;
+
+ int N_48kHz;
+ int CTS_48kHz;
+
+} r600_hdmi_ACR[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+ { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+ { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+ { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+ { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
+};
+
+/*
+ * calculate CTS value if it's not found in the table
+ */
+static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
+{
+ if (*CTS == 0)
+ *CTS = clock*N/(128*freq)*1000;
+ DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+ N, *CTS, freq);
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int CTS;
+ int N;
+ int i;
+
+ for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
+
+ CTS = r600_hdmi_ACR[i].CTS_32kHz;
+ N = r600_hdmi_ACR[i].N_32kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
+ WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_32kHz_N, N);
+
+ CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
+ N = r600_hdmi_ACR[i].N_44_1kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
+ WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_44_1kHz_N, N);
+
+ CTS = r600_hdmi_ACR[i].CTS_48kHz;
+ N = r600_hdmi_ACR[i].N_48kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
+ WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_48kHz_N, N);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void r600_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void r600_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ enum r600_hdmi_color_format color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
+ frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * build a Audio Info Frame
+ */
+static void r600_hdmi_audioinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t channel_count,
+ uint8_t coding_type,
+ uint8_t sample_size,
+ uint8_t sample_frequency,
+ uint8_t format,
+ uint8_t channel_allocation,
+ uint8_t level_shift,
+ int downmix_inhibit
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint8_t frame[11];
+
+ frame[0x0] = 0;
+ frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
+ frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
+ frame[0x3] = format;
+ frame[0x4] = channel_allocation;
+ frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
+ frame[0x6] = 0;
+ frame[0x7] = 0;
+ frame[0x8] = 0;
+ frame[0x9] = 0;
+ frame[0xA] = 0;
+
+ r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+
+ WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
+}
+
+/*
+ * test if audio buffer is filled enough to start playing
+ */
+static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
+}
+
+/*
+ * have buffer status changed since last call?
+ */
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int status, result;
+
+ if (!radeon_encoder->hdmi_offset)
+ return 0;
+
+ status = r600_hdmi_is_audio_buffer_filled(encoder);
+ result = radeon_encoder->hdmi_buffer_status != status;
+ radeon_encoder->hdmi_buffer_status = status;
+
+ return result;
+}
+
+/*
+ * write the audio workaround status to the hardware
+ */
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset = radeon_encoder->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ if (r600_hdmi_is_audio_buffer_filled(encoder)) {
+ /* disable audio workaround and start delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+
+ } else if (radeon_encoder->hdmi_audio_workaround) {
+ /* enable audio workaround and start delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+
+ } else {
+ /* disable audio workaround and stop delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ }
+}
+
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ r600_audio_set_clock(encoder, mode->clock);
+
+ WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
+ WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
+ WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
+
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
+
+ WREG32(offset+R600_HDMI_VERSION, 0x202);
+
+ r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* audio packets per line, does anyone know how to calc this ? */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
+
+ /* update? reset? don't realy know */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
+}
+
+/*
+ * update settings with current parameters from audio engine
+ */
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
+ int channels,
+ int rate,
+ int bps,
+ uint8_t status_bits,
+ uint8_t category_code)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint32_t iec;
+
+ if (!offset)
+ return;
+
+ DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
+ r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
+ channels, rate, bps);
+ DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
+ (int)status_bits, (int)category_code);
+
+ iec = 0;
+ if (status_bits & AUDIO_STATUS_PROFESSIONAL)
+ iec |= 1 << 0;
+ if (status_bits & AUDIO_STATUS_NONAUDIO)
+ iec |= 1 << 1;
+ if (status_bits & AUDIO_STATUS_COPYRIGHT)
+ iec |= 1 << 2;
+ if (status_bits & AUDIO_STATUS_EMPHASIS)
+ iec |= 1 << 3;
+
+ iec |= category_code << 8;
+
+ switch (rate) {
+ case 32000: iec |= 0x3 << 24; break;
+ case 44100: iec |= 0x0 << 24; break;
+ case 88200: iec |= 0x8 << 24; break;
+ case 176400: iec |= 0xc << 24; break;
+ case 48000: iec |= 0x2 << 24; break;
+ case 96000: iec |= 0xa << 24; break;
+ case 192000: iec |= 0xe << 24; break;
+ }
+
+ WREG32(offset+R600_HDMI_IEC60958_1, iec);
+
+ iec = 0;
+ switch (bps) {
+ case 16: iec |= 0x2; break;
+ case 20: iec |= 0x3; break;
+ case 24: iec |= 0xb; break;
+ }
+ if (status_bits & AUDIO_STATUS_V)
+ iec |= 0x5 << 16;
+
+ WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
+
+ /* 0x021 or 0x031 sets the audio frame length */
+ WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
+ r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* update? reset? don't realy know */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
+}
+
+/*
+ * enable/disable the HDMI engine
+ */
+void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
+
+ /* some version of atombios ignore the enable HDMI flag
+ * so enabling/disabling HDMI was moved here for TMDS1+2 */
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* This part is doubtfull in my opinion */
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
+ break;
+
+ default:
+ DRM_ERROR("unknown HDMI output type\n");
+ break;
+ }
+}
+
+/*
+ * determin at which register offset the HDMI encoder is
+ */
+void r600_hdmi_init(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ switch (r600_audio_tmds_index(encoder)) {
+ case 0:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ break;
+ case 1:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ break;
+ default:
+ radeon_encoder->hdmi_offset = 0;
+ break;
+ }
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ radeon_encoder->hdmi_offset = R600_HDMI_DIG;
+ break;
+
+ default:
+ radeon_encoder->hdmi_offset = 0;
+ break;
+ }
+
+ DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
+ radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+
+ /* TODO: make this configureable */
+ radeon_encoder->hdmi_audio_workaround = 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index e2d1f5f33f7..d0e28ffdeda 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -110,5 +110,79 @@
#define R600_BIOS_6_SCRATCH 0x173c
#define R600_BIOS_7_SCRATCH 0x1740
+/* Audio, these regs were reverse enginered,
+ * so the chance is high that the naming is wrong
+ * R6xx+ ??? */
+
+/* Audio clocks */
+#define R600_AUDIO_PLL1_MUL 0x0514
+#define R600_AUDIO_PLL1_DIV 0x0518
+#define R600_AUDIO_PLL2_MUL 0x0524
+#define R600_AUDIO_PLL2_DIV 0x0528
+#define R600_AUDIO_CLK_SRCSEL 0x0534
+
+/* Audio general */
+#define R600_AUDIO_ENABLE 0x7300
+#define R600_AUDIO_TIMING 0x7344
+
+/* Audio params */
+#define R600_AUDIO_VENDOR_ID 0x7380
+#define R600_AUDIO_REVISION_ID 0x7384
+#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
+#define R600_AUDIO_NID1_NODE_COUNT 0x738c
+#define R600_AUDIO_NID1_TYPE 0x7390
+#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
+#define R600_AUDIO_SUPPORTED_CODEC 0x7398
+#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
+#define R600_AUDIO_NID2_CAPS 0x73a0
+#define R600_AUDIO_NID3_CAPS 0x73a4
+#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
+
+/* Audio conn list */
+#define R600_AUDIO_CONN_LIST_LEN 0x73ac
+#define R600_AUDIO_CONN_LIST 0x73b0
+
+/* Audio verbs */
+#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
+#define R600_AUDIO_PLAYING 0x73c4
+#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
+#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
+#define R600_AUDIO_PIN_SENSE 0x73d0
+#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
+#define R600_AUDIO_STATUS_BITS 0x73d8
+
+/* HDMI base register addresses */
+#define R600_HDMI_TMDS1 0x7400
+#define R600_HDMI_TMDS2 0x7700
+#define R600_HDMI_DIG 0x7800
+
+/* HDMI registers */
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+#define R600_HDMI_CNTL 0x08
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894edadab..30480881aed 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+
+#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
+#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280E0_BASE_256B 0x00000000
+#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
+#define R_0280C0_CB_COLOR0_TILE 0x0280C0
+#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280C0_BASE_256B 0x00000000
+#define R_0280C4_CB_COLOR1_TILE 0x0280C4
+#define R_0280C8_CB_COLOR2_TILE 0x0280C8
+#define R_0280CC_CB_COLOR3_TILE 0x0280CC
+#define R_0280D0_CB_COLOR4_TILE 0x0280D0
+#define R_0280D4_CB_COLOR5_TILE 0x0280D4
+#define R_0280D8_CB_COLOR6_TILE 0x0280D8
+#define R_0280DC_CB_COLOR7_TILE 0x0280DC
+
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c938bb54123..c0356bb193e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,12 +89,14 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
+extern int radeon_audio;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4
@@ -161,6 +163,7 @@ struct radeon_fence_driver {
struct list_head created;
struct list_head emited;
struct list_head signaled;
+ bool initialized;
};
struct radeon_fence {
@@ -201,8 +204,9 @@ struct radeon_surface_reg {
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref;
- bool mem_global_referenced;
struct ttm_bo_device bdev;
+ bool mem_global_referenced;
+ bool initialized;
};
struct radeon_bo {
@@ -316,10 +320,12 @@ struct radeon_mc {
u64 real_vram_size;
int vram_mtrr;
bool vram_is_ddr;
+ bool igp_sideport_enabled;
};
int radeon_mc_setup(struct radeon_device *rdev);
-
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
/*
* GPU scratch registers structures, functions & helpers
@@ -358,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
*/
struct radeon_ib {
struct list_head list;
- unsigned long idx;
+ unsigned idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
- uint32_t *ptr;
+ uint32_t *ptr;
uint32_t length_dw;
+ bool free;
};
/*
@@ -372,10 +379,9 @@ struct radeon_ib {
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
- struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
- DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+ unsigned head_id;
};
struct radeon_cp {
@@ -405,13 +411,13 @@ struct r600_ih {
unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
- uint32_t align_mask;
uint32_t ptr_mask;
spinlock_t lock;
bool enabled;
};
struct r600_blit {
+ struct mutex mutex;
struct radeon_bo *shader_obj;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
@@ -460,6 +466,7 @@ struct radeon_cs_chunk {
};
struct radeon_cs_parser {
+ struct device *dev;
struct radeon_device *rdev;
struct drm_file *filp;
/* chunks */
@@ -651,11 +658,17 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
- void (*hdp_flush)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ /* ioctl hw specific callback. Some hw might want to perform special
+ * operation on specific ioctl. For instance on wait idle some hw
+ * might want to perform and HDP flush through MMIO as it seems that
+ * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+ * through ring.
+ */
+ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
};
/*
@@ -664,11 +677,14 @@ struct radeon_asic {
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
};
struct r300_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
};
struct r600_asic {
@@ -814,6 +830,14 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
struct work_struct hotplug_work;
+
+ /* audio stuff */
+ struct timer_list audio_timer;
+ int audio_channels;
+ int audio_rate;
+ int audio_bits_per_sample;
+ uint8_t audio_status_bits;
+ uint8_t audio_category_code;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -832,7 +856,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
- if (reg < 0x10000)
+ if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -842,7 +866,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- if (reg < 0x10000)
+ if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -996,13 +1020,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
-#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */
+/* AGP */
+extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1016,6 +1041,7 @@ extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1125,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
+extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1145,6 +1172,22 @@ extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
+extern void r600_irq_suspend(struct radeon_device *rdev);
+/* r600 audio */
+extern int r600_audio_init(struct radeon_device *rdev);
+extern int r600_audio_tmds_index(struct drm_encoder *encoder);
+extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern void r600_audio_fini(struct radeon_device *rdev);
+extern void r600_hdmi_init(struct drm_encoder *encoder);
+extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
+extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
+ int channels,
+ int rate,
+ int bps,
+ uint8_t status_bits,
+ uint8_t category_code);
#include "radeon_object.h"
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 54bf49a6d67..c0681a5556d 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_info(rdev->ddev, &info);
if (ret) {
+ drm_agp_release(rdev->ddev);
DRM_ERROR("Unable to get AGP info: %d\n", ret);
return ret;
}
+
+ if (rdev->ddev->agp->agp_info.aper_size < 32) {
+ drm_agp_release(rdev->ddev);
+ dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
+ "need at least 32M, disabling AGP\n",
+ rdev->ddev->agp->agp_info.aper_size);
+ return -EINVAL;
+ }
+
mode.mode = info.mode;
agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_enable(rdev->ddev, mode);
if (ret) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+ drm_agp_release(rdev->ddev);
return ret;
}
@@ -252,10 +263,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
void radeon_agp_fini(struct radeon_device *rdev)
{
#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
- drm_agp_release(rdev->ddev);
- }
+ if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+ drm_agp_release(rdev->ddev);
}
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 636116bedcb..05ee1aeac3f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -33,6 +33,7 @@
*/
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
@@ -76,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
-void r100_hdp_flush(struct radeon_device *rdev);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -106,18 +106,18 @@ static struct radeon_asic r100_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -166,18 +166,18 @@ static struct radeon_asic r300_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -217,11 +217,11 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -259,18 +259,18 @@ static struct radeon_asic rs400_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -323,11 +323,11 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -371,11 +371,11 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -423,11 +423,11 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -466,11 +466,11 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -507,12 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
-void r600_hdp_flush(struct radeon_device *rdev);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -543,11 +543,11 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
/*
@@ -588,11 +588,11 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 12a0c760e7f..4d8831548a5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c.i2c_id = gpio->sucI2cId.ucAccess;
i2c.valid = true;
+ break;
}
}
@@ -205,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* Asrock RS600 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x7941) &&
+ (dev->pdev->subsystem_vendor == 0x1849) &&
+ (dev->pdev->subsystem_device == 0x7941)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&
@@ -286,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* XFX Pine Group device rv730 reports no VGA DDC lines
+ * even though they are wired up to record 0x93
+ */
+ if ((dev->pdev->device == 0x9498) &&
+ (dev->pdev->subsystem_vendor == 0x1682) &&
+ (dev->pdev->subsystem_device == 0x2452)) {
+ struct radeon_device *rdev = dev->dev_private;
+ *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ }
return true;
}
@@ -345,7 +364,9 @@ const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
- DRM_MODE_CONNECTOR_DisplayPort
+ DRM_MODE_CONNECTOR_DisplayPort,
+ DRM_MODE_CONNECTOR_eDP,
+ DRM_MODE_CONNECTOR_Unknown
};
bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
@@ -745,8 +766,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
- (1 <<
- i),
+ (1 << i),
dac),
(1 << i));
}
@@ -758,32 +778,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (bios_connectors[j].valid && (i != j)) {
if (bios_connectors[i].line_mux ==
bios_connectors[j].line_mux) {
- if (((bios_connectors[i].
- devices &
- (ATOM_DEVICE_DFP_SUPPORT))
- && (bios_connectors[j].
- devices &
- (ATOM_DEVICE_CRT_SUPPORT)))
- ||
- ((bios_connectors[j].
- devices &
- (ATOM_DEVICE_DFP_SUPPORT))
- && (bios_connectors[i].
- devices &
- (ATOM_DEVICE_CRT_SUPPORT)))) {
- bios_connectors[i].
- devices |=
- bios_connectors[j].
- devices;
- bios_connectors[i].
- connector_type =
- DRM_MODE_CONNECTOR_DVII;
- if (bios_connectors[j].devices &
- (ATOM_DEVICE_DFP_SUPPORT))
+ /* make sure not to combine LVDS */
+ if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[i].line_mux = 53;
+ bios_connectors[i].ddc_bus.valid = false;
+ continue;
+ }
+ if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[j].line_mux = 53;
+ bios_connectors[j].ddc_bus.valid = false;
+ continue;
+ }
+ /* combine analog and digital for DVI-I */
+ if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
+ ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
+ bios_connectors[i].devices |=
+ bios_connectors[j].devices;
+ bios_connectors[i].connector_type =
+ DRM_MODE_CONNECTOR_DVII;
+ if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
bios_connectors[i].hpd =
bios_connectors[j].hpd;
- bios_connectors[j].
- valid = false;
+ bios_connectors[j].valid = false;
}
}
}
@@ -938,6 +956,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
return false;
}
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+};
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (igp_info) {
+ switch (crev) {
+ case 1:
+ if (igp_info->info.ucMemoryType & 0xf0)
+ return true;
+ break;
+ case 2:
+ if (igp_info->info_2.ucMemoryType & 0x0f)
+ return true;
+ break;
+ default:
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ break;
+ }
+ }
+ return false;
+}
+
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds)
{
@@ -1029,6 +1084,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ break;
}
}
}
@@ -1234,6 +1290,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
return true;
}
+enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+ uint16_t data_offset;
+ uint8_t frev, crev;
+ struct _ATOM_ANALOG_TV_INFO *tv_info;
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+
+ tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+
+ switch (tv_info->ucTV_BootUpDefaultStandard) {
+ case ATOM_TV_NTSC:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Default TV standard: NTSC\n");
+ break;
+ case ATOM_TV_NTSCJ:
+ tv_std = TV_STD_NTSC_J;
+ DRM_INFO("Default TV standard: NTSC-J\n");
+ break;
+ case ATOM_TV_PAL:
+ tv_std = TV_STD_PAL;
+ DRM_INFO("Default TV standard: PAL\n");
+ break;
+ case ATOM_TV_PALM:
+ tv_std = TV_STD_PAL_M;
+ DRM_INFO("Default TV standard: PAL-M\n");
+ break;
+ case ATOM_TV_PALN:
+ tv_std = TV_STD_PAL_N;
+ DRM_INFO("Default TV standard: PAL-N\n");
+ break;
+ case ATOM_TV_PALCN:
+ tv_std = TV_STD_PAL_CN;
+ DRM_INFO("Default TV standard: PAL-CN\n");
+ break;
+ case ATOM_TV_PAL60:
+ tv_std = TV_STD_PAL_60;
+ DRM_INFO("Default TV standard: PAL-60\n");
+ break;
+ case ATOM_TV_SECAM:
+ tv_std = TV_STD_SECAM;
+ DRM_INFO("Default TV standard: SECAM\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
+ return tv_std;
+}
+
struct radeon_encoder_tv_dac *
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
{
@@ -1269,6 +1380,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
}
return tv_dac;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc5..7932dc4d6b9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- start_jiffies = jiffies;
- for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
- if (r) {
- goto out_cleanup;
+
+ /* r100 doesn't have dma engine so skip the test */
+ if (rdev->asic->copy_dma) {
+
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE, fence);
+
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
}
- r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
- if (r) {
- goto out_cleanup;
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
+ " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
+ n, size >> 10,
+ sdomain, ddomain, time,
+ i, i * 1000, (i * 1000) / 1024);
}
- r = radeon_fence_wait(fence, false);
- if (r) {
- goto out_cleanup;
- }
- radeon_fence_unref(&fence);
- }
- end_jiffies = jiffies;
- time = end_jiffies - start_jiffies;
- time = jiffies_to_msecs(time);
- if (time > 0) {
- i = ((n * size) >> 10) / time;
- printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
- " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
- sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
}
+
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index b062109efbe..73c4405bf42 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,13 +56,13 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
else if (post_div == 3)
sclk >>= 2;
else if (post_div == 4)
- sclk >>= 4;
+ sclk >>= 3;
return sclk;
}
/* 10 khz */
-static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
{
struct radeon_pll *mpll = &rdev->clock.mpll;
uint32_t fb_div, ref_div, post_div, mclk;
@@ -86,7 +86,7 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
else if (post_div == 3)
mclk >>= 2;
else if (post_div == 4)
- mclk >>= 4;
+ mclk >>= 3;
return mclk;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c5021a3445d..e7b19440102 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -595,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
return false;
}
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 igp_info;
+
+ igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+ if (igp_info) {
+ if (RBIOS16(igp_info + 0x4))
+ return true;
+ }
+ return false;
+}
+
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+ 0x00000808, /* r100 */
+ 0x00000808, /* rv100 */
+ 0x00000808, /* rs100 */
+ 0x00000808, /* rv200 */
+ 0x00000808, /* rs200 */
+ 0x00000808, /* r200 */
+ 0x00000808, /* rv250 */
+ 0x00000000, /* rs300 */
+ 0x00000808, /* rv280 */
+ 0x00000808, /* r300 */
+ 0x00000808, /* r350 */
+ 0x00000808, /* rv350 */
+ 0x00000808, /* rv380 */
+ 0x00000808, /* r420 */
+ 0x00000808, /* r423 */
+ 0x00000808, /* rv410 */
+ 0x00000000, /* rs400 */
+ 0x00000000, /* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+ struct radeon_encoder_primary_dac *p_dac)
+{
+ p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+ return;
+}
+
struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
radeon_encoder
*encoder)
@@ -604,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
uint16_t dac_info;
uint8_t rev, bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
+ int found = 0;
- if (rdev->bios == NULL)
+ p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
+ GFP_KERNEL);
+
+ if (!p_dac)
return NULL;
+ if (rdev->bios == NULL)
+ goto out;
+
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
- p_dac =
- kzalloc(sizeof(struct radeon_encoder_primary_dac),
- GFP_KERNEL);
-
- if (!p_dac)
- return NULL;
-
rev = RBIOS8(dac_info) & 0x3;
if (rev < 2) {
bg = RBIOS8(dac_info + 0x2) & 0xf;
@@ -628,20 +670,26 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
-
+ found = 1;
}
+out:
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
return p_dac;
}
-static enum radeon_tv_std
-radeon_combios_get_tv_info(struct radeon_encoder *encoder)
+enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev)
{
- struct drm_device *dev = encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *dev = rdev->ddev;
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
+ if (rdev->bios == NULL)
+ return tv_std;
+
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
@@ -779,7 +827,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
found = 1;
}
- tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
+ tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
}
if (!found) {
/* then check CRT table */
@@ -923,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
lvds->native_mode.vdisplay);
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
- if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
- lvds->panel_vcc_delay = 2000;
+ lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5eece186e03..65f81942f39 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_needs_link_train(radeon_connector)) {
if (connector->encoder)
dp_link_train(connector->encoder, connector);
@@ -208,6 +210,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
drm_mode_set_name(mode);
DRM_DEBUG("Adding native panel mode %s\n", mode->name);
+ } else if (native_mode->hdisplay != 0 &&
+ native_mode->vdisplay != 0) {
+ /* mac laptops without an edid */
+ /* Note that this is not necessarily the exact panel mode,
+ * but an approximation based on the cvt formula. For these
+ * systems we should ideally read the mode info out of the
+ * registers or add a mode table, but this works and is much
+ * simpler.
+ */
+ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
}
return mode;
}
@@ -566,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
- bool dret;
+ bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -603,7 +619,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
ret = connector_status_connected;
}
} else {
- if (radeon_connector->dac_load_detect) {
+ if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
@@ -726,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
- bool dret;
+ bool dret = false;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -762,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
* connected and the DVI port disconnected. If the edid doesn't
* say HDMI, vice versa.
*/
- if (radeon_connector->shared_ddc && connector_status_connected) {
+ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_device *dev = connector->dev;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
@@ -886,10 +904,18 @@ static void radeon_dvi_force(struct drm_connector *connector)
static int radeon_dvi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
/* XXX check mode bandwidth */
+ /* clocks over 135 MHz have heat issues with DVI on RV100 */
+ if (radeon_connector->use_digital &&
+ (rdev->family == CHIP_RV100) &&
+ (mode->clock > 135000))
+ return MODE_CLOCK_HIGH;
+
if (radeon_connector->use_digital && (mode->clock > 165000)) {
if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
@@ -955,7 +981,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
}
sink_type = radeon_dp_getsinktype(radeon_connector);
- if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_getdpcd(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
@@ -980,7 +1007,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
/* XXX check mode bandwidth */
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return radeon_dp_mode_valid_helper(radeon_connector, mode);
else
return MODE_OK;
@@ -1032,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
return;
}
if (radeon_connector->ddc_bus && i2c_bus->valid) {
- if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
- sizeof(struct radeon_i2c_bus_rec)) == 0) {
+ if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
@@ -1133,6 +1160,7 @@ radeon_add_atom_connector(struct drm_device *dev,
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
@@ -1145,10 +1173,16 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
if (i2c_bus->valid) {
/* add DP i2c bus */
- radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+ else
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
+ else
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1171,7 +1205,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
- 1);
+ radeon_atombios_get_tv_info(rdev));
}
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -1312,10 +1346,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->dac_load_detect = false;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
- 1);
+ radeon_connector->dac_load_detect);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
- 1);
+ radeon_combios_get_tv_info(rdev));
}
break;
case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0b2f9c2ad2c..06123ba31d3 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
&master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
+ kfree(master_priv);
return ret;
}
master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0f1d9..e9d085021c1 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
&p->validated);
}
}
- return radeon_bo_list_validate(&p->validated, p->ib->fence);
+ return radeon_bo_list_validate(&p->validated);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (error) {
- radeon_bo_list_unvalidate(&parser->validated,
- parser->ib->fence);
- } else {
- radeon_bo_list_unreserve(&parser->validated);
+ if (!error && parser->ib) {
+ radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
+ radeon_bo_list_unreserve(&parser->validated);
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
mutex_lock(&parser->rdev->ddev->struct_mutex);
@@ -231,6 +229,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.rdev = rdev;
+ parser.dev = rdev->dev;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 02bcdb1240c..768b1509fa0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -391,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev)
/* FIXME: not supported yet */
return -EINVAL;
}
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ rdev->asic->get_memory_clock = NULL;
+ rdev->asic->set_memory_clock = NULL;
+ }
+
return 0;
}
@@ -481,6 +487,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
@@ -537,11 +544,75 @@ void radeon_agp_disable(struct radeon_device *rdev)
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+void radeon_check_arguments(struct radeon_device *rdev)
+{
+ /* vramlimit must be a power of two */
+ switch (radeon_vram_limit) {
+ case 0:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
+ radeon_vram_limit);
+ radeon_vram_limit = 0;
+ break;
+ }
+ radeon_vram_limit = radeon_vram_limit << 20;
+ /* gtt size must be power of two and greater or equal to 32M */
+ switch (radeon_gart_size) {
+ case 4:
+ case 8:
+ case 16:
+ dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+ break;
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+ break;
+ }
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ /* AGP mode can only be -1, 1, 2, 4, 8 */
+ switch (radeon_agpmode) {
+ case -1:
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
+ "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
+ radeon_agpmode = 0;
+ break;
+ }
}
-/*
- * Radeon device.
- */
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -580,9 +651,9 @@ int radeon_device_init(struct radeon_device *rdev,
/* Set asic functions */
r = radeon_asic_init(rdev);
- if (r) {
+ if (r)
return r;
- }
+ radeon_check_arguments(rdev);
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
@@ -663,16 +734,18 @@ void radeon_device_fini(struct radeon_device *rdev)
*/
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
- struct radeon_device *rdev = dev->dev_private;
+ struct radeon_device *rdev;
struct drm_crtc *crtc;
int r;
- if (dev == NULL || rdev == NULL) {
+ if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
if (state.event == PM_EVENT_PRETHAW) {
return 0;
}
+ rdev = dev->dev_private;
+
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a133b833e45..7e17a362b54 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
"INTERNAL_UNIPHY2",
};
-static const char *connector_names[13] = {
+static const char *connector_names[15] = {
"Unknown",
"VGA",
"DVI-I",
@@ -248,6 +248,8 @@ static const char *connector_names[13] = {
"DisplayPort",
"HDMI-A",
"HDMI-B",
+ "TV",
+ "eDP",
};
static const char *hpd_names[7] = {
@@ -276,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -286,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+ }
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -329,8 +340,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_atom_connector_info_from_object_table(dev);
else
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
- } else
+ } else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
+ if (ret == false)
+ ret = radeon_get_legacy_connector_info_from_table(dev);
+ }
} else {
if (!ASIC_IS_AVIVO(rdev))
ret = radeon_get_legacy_connector_info_from_table(dev);
@@ -349,9 +363,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
int ret = 0;
- if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (dig->dp_i2c_bus)
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
}
if (!radeon_connector->ddc_bus)
@@ -404,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags)
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
+ uint32_t min_post_div = pll->min_post_div;
+ uint32_t max_post_div = pll->max_post_div;
uint32_t min_fractional_feed_div = 0;
uint32_t max_fractional_feed_div = 0;
uint32_t best_vco = pll->best_vco;
@@ -424,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
- if (flags & RADEON_PLL_USE_REF_DIV)
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
while (min_ref_div < max_ref_div-1) {
@@ -439,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
}
}
- if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ if (pll->flags & RADEON_PLL_USE_POST_DIV)
+ min_post_div = max_post_div = pll->post_div;
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
min_fractional_feed_div = pll->min_frac_feedback_div;
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
+ for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
uint32_t ref_div;
- if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
continue;
/* legacy radeons only have a few post_divs */
- if (flags & RADEON_PLL_LEGACY) {
+ if (pll->flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) ||
(post_div == 7) ||
(post_div == 9) ||
@@ -498,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
current_freq = radeon_div(tmp, ref_div * post_div);
- if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+ if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
error = freq - current_freq;
error = error < 0 ? 0xffffffff : error;
} else
@@ -525,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
best_freq = current_freq;
best_error = error;
best_vco_diff = vco_diff;
- } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
- ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
- ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+ } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
best_post_div = post_div;
best_ref_div = ref_div;
best_feedback_div = feedback_div;
@@ -566,8 +586,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags)
+ uint32_t *post_div_p)
{
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
fixed20_12 pll_out_max, pll_out_min;
@@ -661,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
radeonfb_remove(dev, fb);
if (radeon_fb->obj) {
- radeon_gem_object_unpin(radeon_fb->obj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(radeon_fb->obj);
mutex_unlock(&dev->struct_mutex);
@@ -709,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
-
+ if (obj == NULL) {
+ dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ "can't create framebuffer\n", mode_cmd->handle);
+ return NULL;
+ }
return radeon_framebuffer_create(dev, mode_cmd, obj);
}
@@ -739,7 +761,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
{ TV_STD_SECAM, "secam" },
};
-int radeon_modeset_create_props(struct radeon_device *rdev)
+static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c5c45e626d7..8ba3de7994d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -87,6 +87,7 @@ int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_new_pll = 1;
+int radeon_audio = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -124,6 +125,9 @@ module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
module_param_named(new_pll, radeon_new_pll, int, 0444);
+MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -192,7 +196,7 @@ static struct drm_driver driver_old = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
@@ -280,7 +284,7 @@ static struct drm_driver kms_driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = radeon_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e13785282a8..c57ad606504 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
* 1.29- R500 3D cmd buffer support
* 1.30- Add support for occlusion queries
* 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 31
+#define DRIVER_MINOR 32
#define DRIVER_PATCHLEVEL 0
enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b4f23ec9320..3c91724457c 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
return ret;
}
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ default:
+ return false;
+ }
+}
void
radeon_link_encoder_connector(struct drm_device *dev)
{
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->devices & radeon_connector->devices)
+ if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -233,6 +253,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
if (!ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
}
adjusted_mode->base.id = mode_id;
}
@@ -438,6 +460,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
+ int hdmi_detected = 0;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
struct drm_connector *connector;
@@ -458,6 +481,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (!radeon_connector->con_priv)
return;
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ hdmi_detected = 1;
+
dig_connector = radeon_connector->con_priv;
memset(&args, 0, sizeof(args));
@@ -487,13 +513,13 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (hdmi_detected)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & (1 << 0))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
if (dig_connector->linkb)
@@ -512,7 +538,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (hdmi_detected)
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
@@ -520,18 +546,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & (1 << 0))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & (1 << 5)) {
+ if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
- if (dig->lvds_misc & (1 << 6)) {
+ if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
- if (((dig->lvds_misc >> 2) & 0x3) == 2)
+ if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
@@ -552,7 +578,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
+ r600_hdmi_enable(encoder, hdmi_detected);
}
int
@@ -590,21 +616,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = radeon_connector->con_priv;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
break;
- case CONNECTOR_DVI_A:
- case CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
break;
- case CONNECTOR_STV:
- case CONNECTOR_CTV:
- case CONNECTOR_DIN:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
/*return ATOM_ENCODER_MODE_CV;*/
@@ -668,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- num = dig->dig_block + 1;
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- num = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- num = 2;
- break;
- }
- }
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ num = dig->dig_encoder + 1;
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
@@ -814,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
+ if (dig->dig_encoder == 1)
args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1;
@@ -841,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v2.acConfig.fCoherentMode = 1;
}
} else {
+
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+ if (dig->dig_encoder)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) {
if (dig_connector->igp_lane_info & 0x3)
@@ -870,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
}
break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- break;
}
if (radeon_encoder->pixel_clock > 165000)
@@ -893,7 +896,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
static void
@@ -1039,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
union crtc_sourc_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
memset(&args, 0, sizeof(args));
@@ -1102,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (ASIC_IS_DCE32(rdev)) {
- if (radeon_crtc->crtc_id)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- } else {
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return;
- radeon_connector = to_radeon_connector(connector);
- if (!radeon_connector->con_priv)
- return;
- dig_connector = radeon_connector->con_priv;
-
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- }
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ dig = radeon_encoder->enc_priv;
+ if (dig->dig_encoder)
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@@ -1162,7 +1141,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
static void
@@ -1196,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
}
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *test_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ uint32_t dig_enc_in_use = 0;
+ /* on DCE32 and encoder can driver any block so just crtc id */
+ if (ASIC_IS_DCE32(rdev)) {
+ return radeon_crtc->crtc_id;
+ }
+
+ /* on DCE3 - LVTMA can only be driven by DIGB */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_test_encoder;
+
+ if (encoder == test_encoder)
+ continue;
+
+ if (!radeon_encoder_is_digital(test_encoder))
+ continue;
+
+ radeon_test_encoder = to_radeon_encoder(test_encoder);
+ dig = radeon_test_encoder->enc_priv;
+
+ if (dig->dig_encoder >= 0)
+ dig_enc_in_use |= (1 << dig->dig_encoder);
+ }
+
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+ if (dig_enc_in_use & 0x2)
+ DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+ return 1;
+ }
+ if (!(dig_enc_in_use & 1))
+ return 0;
+ return 1;
+}
+
static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -1208,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
- if (radeon_encoder->enc_priv) {
- struct radeon_encoder_atom_dig *dig;
-
- dig = radeon_encoder->enc_priv;
- dig->dig_block = radeon_crtc->crtc_id;
- }
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig)
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
}
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1265,6 +1281,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
static bool
@@ -1371,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (radeon_encoder_is_digital(encoder)) {
+ dig = radeon_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
radeon_encoder->active_device = 0;
}
@@ -1428,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
/* coherent mode by default */
dig->coherent_mode = true;
+ dig->dig_encoder = -1;
return dig;
}
@@ -1510,4 +1535,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
}
+
+ r600_hdmi_init(encoder);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06..d71e346e9ab 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- memset_io(fbptr, 0xff, aligned_size);
+ memset_io(fbptr, 0x0, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index cb4cd97ae39..8495d4e32e1 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
bool radeon_fence_signaled(struct radeon_fence *fence)
{
- struct radeon_device *rdev = fence->rdev;
unsigned long irq_flags;
bool signaled = false;
- if (rdev->gpu_lockup) {
+ if (!fence)
return true;
- }
- if (fence == NULL) {
+
+ if (fence->rdev->gpu_lockup)
return true;
- }
+
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
@@ -324,7 +323,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
if (r) {
- DRM_ERROR("Fence failed to get a scratch register.");
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
@@ -335,9 +334,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
+ rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for fence !\n");
+ dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
return 0;
}
@@ -346,11 +346,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
{
unsigned long irq_flags;
+ if (!rdev->fence_drv.initialized)
+ return;
wake_up_all(&rdev->fence_drv.queue);
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- DRM_INFO("radeon: fence finalized\n");
+ rdev->fence_drv.initialized = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 2944486871b..db8e9a355a0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -66,8 +66,9 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
}
r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
- size, initial_domain, alignment);
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
mutex_lock(&rdev->ddev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
@@ -130,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
- radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -308,10 +308,12 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, NULL, false);
+ /* callback hw specific functions if any */
+ if (robj->rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- radeon_hdp_flush(robj->rdev);
return r;
}
@@ -350,9 +352,10 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
rbo = gobj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
- return r;
+ goto out;
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
+out:
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index a1bf11de308..48b7cea31e0 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
&init->gart_textures_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
}
typedef struct drm_radeon_clear32 {
@@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
&clr->depth_boxes))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
}
typedef struct drm_radeon_stipple32 {
@@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
&request->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
}
typedef struct drm_radeon_tex_image32 {
@@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
&image->data))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
}
typedef struct drm_radeon_vertex2_32 {
@@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
&request->prim))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
}
typedef struct drm_radeon_cmd_buffer32 {
@@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
&request->boxes))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
}
typedef struct drm_radeon_getparam32 {
@@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
}
typedef struct drm_radeon_mem_alloc32 {
@@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
&request->region_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
}
typedef struct drm_radeon_irq_emit32 {
@@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
&request->irq_seq))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
}
/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
@@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
}
#else
#define compat_radeon_cp_setparam NULL
@@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
@@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- lock_kernel(); /* XXX for now */
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index b79ecc4a7cc..2f349a30019 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
drm_radeon_irq_emit_t *emit = data;
int result;
- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
- return -EINVAL;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return -EINVAL;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
result = radeon_emit_irq(dev);
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9223296fe37..3cfd60fd008 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -97,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.sw_int = false;
for (i = 0; i < 2; i++) {
rdev->irq.crtc_vblank_int[i] = false;
+ rdev->irq.hpd[i] = false;
}
radeon_irq_set(rdev);
}
@@ -128,17 +129,22 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
DRM_INFO("radeon: using MSI.\n");
}
}
- drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
+ r = drm_irq_install(rdev->ddev);
+ if (r) {
+ rdev->irq.installed = false;
+ return r;
+ }
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
+ drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
- rdev->irq.installed = false;
drm_irq_uninstall(rdev->ddev);
+ rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b82ede98e15..b6d8081e124 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -43,8 +43,7 @@ static void radeon_overscan_setup(struct drm_crtc *crtc,
}
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -340,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-/* properly set crtc bpp when using atombios */
-void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int format;
- uint32_t crtc_gen_cntl;
- uint32_t disp_merge_cntl;
- uint32_t crtc_pitch;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- format = 2;
- break;
- case 15: /* 555 */
- format = 3;
- break;
- case 16: /* 565 */
- format = 4;
- break;
- case 24: /* RGB */
- format = 5;
- break;
- case 32: /* xRGB */
- format = 6;
- break;
- default:
- return;
- }
-
- crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
- ((crtc->fb->bits_per_pixel * 8) - 1)) /
- (crtc->fb->bits_per_pixel * 8));
- crtc_pitch |= crtc_pitch << 16;
-
- WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
-
- switch (radeon_crtc->crtc_id) {
- case 0:
- disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
- disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
- WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
-
- crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
- crtc_gen_cntl |= (format << 8);
- crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
- WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
- break;
- case 1:
- disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
- disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
- WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
-
- crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
- crtc_gen_cntl |= (format << 8);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
- WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
- WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
- break;
- }
-}
-
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -756,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
uint32_t post_divider = 0;
uint32_t freq = 0;
uint8_t pll_gain;
- int pll_flags = RADEON_PLL_LEGACY;
bool use_bios_divs = false;
/* PLL registers */
uint32_t pll_ref_div = 0;
@@ -790,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else
pll = &rdev->clock.p1pll;
+ pll->flags = RADEON_PLL_LEGACY;
+
if (mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
@@ -805,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
- pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
if (!rdev->is_atom_bios) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -820,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
}
}
- pll_flags |= RADEON_PLL_USE_REF_DIV;
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
}
}
}
@@ -830,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock,
&freq, &feedback_div, &frac_fb_div,
- &reference_div, &post_divider,
- pll_flags);
+ &reference_div, &post_divider);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
if (post_div->divider == post_divider)
@@ -1059,7 +995,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
radeon_set_pll(crtc, adjusted_mode);
radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
- radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
+ radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
} else {
if (radeon_crtc->rmx_type != RMX_OFF) {
/* FIXME: only first crtc has rmx what should we
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index df00515e81f..38e45e231ef 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -46,6 +46,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000;
+ bool is_mac = false;
DRM_DEBUG("\n");
if (radeon_encoder->enc_priv) {
@@ -58,6 +59,15 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
}
}
+ /* macs (and possibly some x86 oem systems?) wire up LVDS strangely
+ * Taken from radeonfb.
+ */
+ if ((rdev->mode_info.connector_table == CT_IBOOK) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
+ is_mac = true;
+
switch (mode) {
case DRM_MODE_DPMS_ON:
disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
@@ -74,6 +84,8 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
+ if (is_mac)
+ lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
@@ -85,7 +97,14 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
- lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+ if (is_mac) {
+ lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
+ } else {
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+ }
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
@@ -207,6 +226,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
*adjusted_mode = *native_mode;
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
adjusted_mode->base.id = mode_id;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 3a12bb0c056..417684daef4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
unsigned pix_to_tv;
};
-static const uint16_t hor_timing_NTSC[] = {
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x003f,
0x0263,
@@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
0
};
-static const uint16_t vert_timing_NTSC[] = {
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200d,
0x1006,
@@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
0
};
-static const uint16_t hor_timing_PAL[] = {
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x0058,
0x027c,
@@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
0
};
-static const uint16_t vert_timing_PAL[] = {
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200c,
0x1005,
@@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
}
flicker_removal = (tmp + 500) / 1000;
- if (flicker_removal < 3)
- flicker_removal = 3;
- for (i = 0; i < 6; ++i) {
+ if (flicker_removal < 2)
+ flicker_removal = 2;
+ for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
if (flicker_removal == SLOPE_limit[i])
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 44d4b652ea1..e81b2aeb6a8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,32 +46,6 @@ struct radeon_device;
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
-enum radeon_connector_type {
- CONNECTOR_NONE,
- CONNECTOR_VGA,
- CONNECTOR_DVI_I,
- CONNECTOR_DVI_D,
- CONNECTOR_DVI_A,
- CONNECTOR_STV,
- CONNECTOR_CTV,
- CONNECTOR_LVDS,
- CONNECTOR_DIGITAL,
- CONNECTOR_SCART,
- CONNECTOR_HDMI_TYPE_A,
- CONNECTOR_HDMI_TYPE_B,
- CONNECTOR_0XC,
- CONNECTOR_0XD,
- CONNECTOR_DIN,
- CONNECTOR_DISPLAY_PORT,
- CONNECTOR_UNSUPPORTED
-};
-
-enum radeon_dvi_type {
- DVI_AUTO,
- DVI_DIGITAL,
- DVI_ANALOG
-};
-
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
@@ -88,6 +62,7 @@ enum radeon_tv_std {
TV_STD_SCART_PAL,
TV_STD_SECAM,
TV_STD_PAL_CN,
+ TV_STD_PAL_N,
};
/* radeon gpio-based i2c
@@ -150,16 +125,24 @@ struct radeon_tmds_pll {
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define RADEON_PLL_USE_POST_DIV (1 << 12)
struct radeon_pll {
- uint16_t reference_freq;
- uint16_t reference_div;
+ /* reference frequency */
+ uint32_t reference_freq;
+
+ /* fixed dividers */
+ uint32_t reference_div;
+ uint32_t post_div;
+
+ /* pll in/out limits */
uint32_t pll_in_min;
uint32_t pll_in_max;
uint32_t pll_out_min;
uint32_t pll_out_max;
- uint16_t xclk;
+ uint32_t best_vco;
+ /* divider limits */
uint32_t min_ref_div;
uint32_t max_ref_div;
uint32_t min_post_div;
@@ -168,7 +151,12 @@ struct radeon_pll {
uint32_t max_feedback_div;
uint32_t min_frac_feedback_div;
uint32_t max_frac_feedback_div;
- uint32_t best_vco;
+
+ /* flags for the current clock */
+ uint32_t flags;
+
+ /* pll id */
+ uint32_t id;
};
struct radeon_i2c_chan {
@@ -311,7 +299,7 @@ struct radeon_atom_ss {
struct radeon_encoder_atom_dig {
/* atom dig */
bool coherent_mode;
- int dig_block;
+ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
@@ -334,6 +322,9 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int hdmi_offset;
+ int hdmi_audio_workaround;
+ int hdmi_buffer_status;
};
struct radeon_connector_atom_dig {
@@ -392,6 +383,11 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+extern enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev);
+extern enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
@@ -434,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags);
+ uint32_t *post_div_p);
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
@@ -443,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags);
+ uint32_t *post_div_p);
extern void radeon_setup_encoder_clones(struct drm_device *dev);
@@ -470,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
-extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 544e18ffaf2..f1da370928e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -56,6 +56,13 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &radeon_ttm_bo_destroy)
+ return true;
+ return false;
+}
+
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0;
@@ -71,6 +78,8 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
if (domain & RADEON_GEM_DOMAIN_CPU)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
}
@@ -211,9 +220,11 @@ int radeon_bo_unpin(struct radeon_bo *bo)
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
- if (rdev->flags & RADEON_IS_IGP) {
- /* Useless to evict on IGP chips */
- return 0;
+ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+ if (0 && (rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->mc.igp_sideport_enabled == false)
+ /* Useless to evict on IGP chips */
+ return 0;
}
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
@@ -295,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
}
}
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
- struct radeon_fence *old_fence = NULL;
int r;
r = radeon_bo_list_reserve(head);
@@ -323,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
- if (fence) {
- old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
- bo->tbo.sync_obj = radeon_fence_ref(fence);
- bo->tbo.sync_obj_arg = NULL;
- }
- if (old_fence) {
- radeon_fence_unref(&old_fence);
- }
}
return 0;
}
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
{
struct radeon_bo_list *lobj;
- struct radeon_fence *old_fence;
-
- if (fence)
- list_for_each_entry(lobj, head, list) {
- old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
- if (old_fence == fence) {
- lobj->bo->tbo.sync_obj = NULL;
- radeon_fence_unref(&old_fence);
- }
+ struct radeon_bo *bo;
+ struct radeon_fence *old_fence = NULL;
+
+ list_for_each_entry(lobj, head, list) {
+ bo = lobj->bo;
+ spin_lock(&bo->tbo.lock);
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
+ bo->tbo.sync_obj_arg = NULL;
+ spin_unlock(&bo->tbo.lock);
+ if (old_fence) {
+ radeon_fence_unref(&old_fence);
}
- radeon_bo_list_unreserve(head);
+ }
}
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
@@ -481,14 +486,20 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem)
{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo;
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return;
+ rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
}
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo;
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return;
+ rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f6b69c2c0d0..7ab43de1e24 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -59,19 +59,17 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
*
* Returns:
* -EBUSY: buffer is busy and @no_wait is true
- * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
int r;
-retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
- if (r == -ERESTART)
- goto retry;
- dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -125,12 +123,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
{
int r;
-retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
- if (r == -ERESTART)
- goto retry;
- dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
return r;
}
spin_lock(&bo->tbo.lock);
@@ -140,8 +136,6 @@ retry:
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.lock);
ttm_bo_unreserve(&bo->tbo);
- if (unlikely(r == -ERESTART))
- goto retry;
return r;
}
@@ -162,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_list_validate(struct list_head *head);
+extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 34b08d307c8..8bce64cdc32 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+ if (rdev->asic->get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4..6579eb4c1f2 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_fence *fence;
struct radeon_ib *nib;
- unsigned long i;
- int r = 0;
+ int r = 0, i, c;
*ib = NULL;
r = radeon_fence_create(rdev, &fence);
if (r) {
- DRM_ERROR("failed to create fence for new IB\n");
+ dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
- i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
- if (i < RADEON_IB_POOL_SIZE) {
- set_bit(i, rdev->ib_pool.alloc_bm);
- rdev->ib_pool.ibs[i].length_dw = 0;
- *ib = &rdev->ib_pool.ibs[i];
- mutex_unlock(&rdev->ib_pool.mutex);
- goto out;
+ for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+ i &= (RADEON_IB_POOL_SIZE - 1);
+ if (rdev->ib_pool.ibs[i].free) {
+ nib = &rdev->ib_pool.ibs[i];
+ break;
+ }
}
- if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
- /* we go do nothings here */
+ if (nib == NULL) {
+ /* This should never happen, it means we allocated all
+ * IB and haven't scheduled one yet, return EBUSY to
+ * userspace hoping that on ioctl recall we get better
+ * luck
+ */
+ dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex);
- DRM_ERROR("all IB allocated none scheduled.\n");
- r = -EINVAL;
- goto out;
+ radeon_fence_unref(&fence);
+ return -EBUSY;
}
- /* get the first ib on the scheduled list */
- nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
- struct radeon_ib, list);
- if (nib->fence == NULL) {
- /* we go do nothings here */
+ rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ nib->free = false;
+ if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex);
- DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
- r = -EINVAL;
- goto out;
- }
- mutex_unlock(&rdev->ib_pool.mutex);
-
- r = radeon_fence_wait(nib->fence, false);
- if (r) {
- DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
- (unsigned long)nib->gpu_addr, nib->length_dw);
- DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
- goto out;
+ r = radeon_fence_wait(nib->fence, false);
+ if (r) {
+ dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+ nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+ mutex_lock(&rdev->ib_pool.mutex);
+ nib->free = true;
+ mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_fence_unref(&fence);
+ return r;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
}
radeon_fence_unref(&nib->fence);
-
+ nib->fence = fence;
nib->length_dw = 0;
-
- /* scheduled list is accessed here */
- mutex_lock(&rdev->ib_pool.mutex);
- list_del(&nib->list);
- INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex);
-
*ib = nib;
-out:
- if (r) {
- radeon_fence_unref(&fence);
- } else {
- (*ib)->fence = fence;
- }
- return r;
+ return 0;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- mutex_lock(&rdev->ib_pool.mutex);
- if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
- /* IB is scheduled & not signaled don't do anythings */
- mutex_unlock(&rdev->ib_pool.mutex);
- return;
- }
- list_del(&tmp->list);
- INIT_LIST_HEAD(&tmp->list);
- if (tmp->fence)
+ if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
-
- tmp->length_dw = 0;
- clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+ mutex_lock(&rdev->ib_pool.mutex);
+ tmp->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+ DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+ /* once scheduled IB is considered free and protected by the fence */
+ ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
/* Allocate 1M object buffer */
- INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+ rdev->ib_pool.ibs[i].free = true;
}
- bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return;
}
mutex_lock(&rdev->ib_pool.mutex);
- bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
if (ib == NULL) {
return 0;
}
- seq_printf(m, "IB %04lu\n", ib->idx);
+ seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 391c973ec4d..9f5e2f929da 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size
*/
- n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
- rdev->cp.ring_size) / size;
+ n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
+ rdev->cp.ring_size)) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5a19d529d1c..58b5adf974c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -200,10 +200,25 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo;
+ static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+ placement->fpfn = 0;
+ placement->lpfn = 0;
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+ placement->num_placement = 1;
+ placement->num_busy_placement = 1;
+ return;
+ }
+ rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ if (rbo->rdev->cp.ready == false)
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+ else
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
default:
@@ -482,6 +497,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
+ rdev->mman.initialized = true;
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) {
@@ -529,6 +545,8 @@ void radeon_ttm_fini(struct radeon_device *rdev)
{
int r;
+ if (!rdev->mman.initialized)
+ return;
if (rdev->stollen_vga_memory) {
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r == 0) {
@@ -542,6 +560,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
radeon_ttm_global_fini(rdev);
+ rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c8849a1..c29ac434ac9 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
0x22c4 SE_TCL_POINT_SPRITE_CNTL
+0x22d0 SE_PVS_CNTL
+0x22d4 SE_PVS_CONST_CNTL
0x2648 RE_POINTSIZE
0x26c0 RE_TOP_LEFT
0x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 00000000000..989f7a02083
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,795 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A4 SC_HYPERZ_EN
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F1C ZB_BW_CNTL
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F30 ZB_ZMASK_OFFSET
+0x4F34 ZB_ZMASK_PITCH
+0x4F38 ZB_ZMASK_WRINDEX
+0x4F3C ZB_ZMASK_DWORD
+0x4F40 ZB_ZMASK_RDINDEX
+0x4F44 ZB_HIZ_OFFSET
+0x4F48 ZB_HIZ_WRINDEX
+0x4F4C ZB_HIZ_DWORD
+0x4F50 ZB_HIZ_RDINDEX
+0x4F54 ZB_HIZ_PITCH
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 8e3c0b807ad..6801b865d1c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -153,7 +153,7 @@ rs600 0x6d40
0x42A4 SU_POLY_OFFSET_FRONT_SCALE
0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
0x42AC SU_POLY_OFFSET_BACK_SCALE
-0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
0x42B4 SU_POLY_OFFSET_ENABLE
0x42B8 SU_CULL_MODE
0x42C0 SU_DEPTH_SCALE
@@ -291,6 +291,8 @@ rs600 0x6d40
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
0x46C0 US_ALU_RGB_ADDR_0
0x46C4 US_ALU_RGB_ADDR_1
0x46C8 US_ALU_RGB_ADDR_2
@@ -547,6 +549,70 @@ rs600 0x6d40
0x4AB4 US_ALU_ALPHA_INST_61
0x4AB8 US_ALU_ALPHA_INST_62
0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 0102a0d5735..38abf63bf2c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -161,7 +161,12 @@ rv515 0x6d40
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
0x4208 GA_POINT_S1
@@ -171,6 +176,7 @@ rv515 0x6d40
0x4230 GA_POINT_MINMAX
0x4234 GA_LINE_CNTL
0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
0x4260 GA_LINE_STIPPLE_VALUE
0x4264 GA_LINE_S0
0x4268 GA_LINE_S1
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c1fcdddb6be..287fcebfb4e 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0;
}
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(0x0150);
+ if (tmp & (1 << 2)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
- if (r300_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
+ if (rs400_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "rs400: Failed to wait MC idle while "
+ "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
}
}
@@ -356,6 +372,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -369,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
r100_mc_stop(rdev, &save);
/* Wait for mc idle */
- if (r300_mc_wait_for_idle(rdev))
- dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ if (rs400_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
WREG32(R_000148_MC_FB_LOCATION,
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -395,6 +412,7 @@ static int rs400_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -446,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -497,6 +514,8 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
rs400_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -523,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4f8ea426057..c3818562a13 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xffffffffUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -134,7 +135,8 @@ void rs600_hpd_init(struct radeon_device *rdev)
break;
}
}
- rs600_irq_set(rdev);
+ if (rdev->irq.installed)
+ rs600_irq_set(rdev);
}
void rs600_hpd_fini(struct radeon_device *rdev)
@@ -315,6 +317,11 @@ int rs600_irq_set(struct radeon_device *rdev)
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
@@ -396,7 +403,7 @@ int rs600_irq_process(struct radeon_device *rdev)
}
while (status || r500_disp_int) {
/* SW interrupt */
- if (G_000040_SW_INT_EN(status))
+ if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
@@ -553,6 +560,7 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -602,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -681,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1e22f52d603..06e2771aee5 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -172,6 +172,7 @@ static int rs690_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -625,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -674,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -754,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 59632a506b4..0e1e6b8632b 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -536,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -614,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index fbb0357f1ec..03021674d09 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1);
- backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
- rdev->config.rv770.max_backends,
- (0xff << rdev->config.rv770.max_backends) & 0xff);
+ if (rdev->family == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+ rdev->config.rv770.max_backends,
+ (0xff << rdev->config.rv770.max_backends) & 0xff);
gb_tiling_config |= BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =
@@ -779,7 +782,6 @@ int rv770_mc_init(struct radeon_device *rdev)
fixed20_12 a;
u32 tmp;
int chansize, numchan;
- int r;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -822,9 +824,6 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r)
- return r;
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -891,18 +890,25 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
rv770_gpu_init(rdev);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ r = r600_blit_init(rdev);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
- return r;
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+ /* pin copy shader into vram */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("failed to pin blit object %d\n", r);
+ return r;
+ }
}
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -964,13 +970,16 @@ int rv770_suspend(struct radeon_device *rdev)
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ r600_irq_suspend(rdev);
r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (likely(r == 0)) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
}
return 0;
}
@@ -1029,6 +1038,11 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
r = rv770_mc_init(rdev);
if (r)
return r;
@@ -1051,31 +1065,28 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
-
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
- rv770_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
return 0;
@@ -1083,19 +1094,16 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- rv770_suspend(rdev);
-
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- if (rdev->flags & RADEON_IS_AGP)
- radeon_agp_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index eee52aa92a7..021de44c15a 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -50,7 +50,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e725cc0b115..4fd1f067d38 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -80,7 +80,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 012ff2e356b..ec5a43e6572 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1fbb2eea5e8..c7320ce4567 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -71,34 +71,34 @@ static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
return -EINVAL;
}
-static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
- struct ttm_mem_type_manager *man)
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
- printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
+ printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching);
- spin_lock(&glob->lru_lock);
- drm_mm_debug_table(&man->manager, TTM_PFX);
- spin_unlock(&glob->lru_lock);
+ if (mem_type != TTM_PL_SYSTEM) {
+ spin_lock(&bdev->glob->lru_lock);
+ drm_mm_debug_table(&man->manager, TTM_PFX);
+ spin_unlock(&bdev->glob->lru_lock);
+ }
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
- struct ttm_mem_type_manager *man;
int i, ret, mem_type;
- printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
+ printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
@@ -106,10 +106,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
&mem_type);
if (ret)
return;
- man = &bdev->man[mem_type];
printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
i, placement->placement[i], mem_type);
- ttm_mem_type_manager_debug(glob, man);
+ ttm_mem_type_debug(bo->bdev, mem_type);
}
}
@@ -427,7 +426,8 @@ moved:
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
spin_unlock(&bo->lock);
- }
+ } else
+ bo->offset = 0;
return 0;
@@ -465,6 +465,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
BUG_ON(ret);
if (bo->ttm)
@@ -472,20 +474,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ++put_count;
}
if (bo->mem.mm_node) {
bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
- put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
atomic_set(&bo->reserved, 0);
while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
return 0;
}
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry, *nentry;
- struct list_head *list, *next;
- int ret;
+ struct ttm_buffer_object *entry = NULL;
+ int ret = 0;
spin_lock(&glob->lru_lock);
- list_for_each_safe(list, next, &bdev->ddestroy) {
- entry = list_entry(list, struct ttm_buffer_object, ddestroy);
- nentry = NULL;
+ if (list_empty(&bdev->ddestroy))
+ goto out_unlock;
- /*
- * Protect the next list entry from destruction while we
- * unlock the lru_lock.
- */
+ entry = list_first_entry(&bdev->ddestroy,
+ struct ttm_buffer_object, ddestroy);
+ kref_get(&entry->list_kref);
+
+ for (;;) {
+ struct ttm_buffer_object *nentry = NULL;
- if (next != &bdev->ddestroy) {
- nentry = list_entry(next, struct ttm_buffer_object,
- ddestroy);
+ if (entry->ddestroy.next != &bdev->ddestroy) {
+ nentry = list_first_entry(&entry->ddestroy,
+ struct ttm_buffer_object, ddestroy);
kref_get(&nentry->list_kref);
}
- kref_get(&entry->list_kref);
spin_unlock(&glob->lru_lock);
ret = ttm_bo_cleanup_refs(entry, remove_all);
kref_put(&entry->list_kref, ttm_bo_release_list);
+ entry = nentry;
+
+ if (ret || !entry)
+ goto out;
spin_lock(&glob->lru_lock);
- if (nentry) {
- bool next_onlist = !list_empty(next);
- spin_unlock(&glob->lru_lock);
- kref_put(&nentry->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- /*
- * Someone might have raced us and removed the
- * next entry from the list. We don't bother restarting
- * list traversal.
- */
-
- if (!next_onlist)
- break;
- }
- if (ret)
+ if (list_empty(&entry->ddestroy))
break;
}
- ret = !list_empty(&bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+out_unlock:
+ spin_unlock(&glob->lru_lock);
+out:
+ if (entry)
+ kref_put(&entry->list_kref, ttm_bo_release_list);
return ret;
}
@@ -684,19 +677,45 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo;
int ret, put_count = 0;
+retry:
spin_lock(&glob->lru_lock);
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
+ return -EBUSY;
+ }
+
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(bo);
+
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
+ if (unlikely(ret == -EBUSY)) {
+ spin_unlock(&glob->lru_lock);
+ if (likely(!no_wait))
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+
+ /**
+ * We *need* to retry after releasing the lru lock.
+ */
+
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- if (unlikely(ret != 0))
- return ret;
+
+ BUG_ON(ret != 0);
+
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
ret = ttm_bo_evict(bo, interruptible, no_wait);
ttm_bo_unreserve(bo);
+
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -849,7 +868,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
int i, ret;
mem->mm_node = NULL;
- for (i = 0; i <= placement->num_placement; ++i) {
+ for (i = 0; i < placement->num_placement; ++i) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
@@ -900,8 +919,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!type_found)
return -EINVAL;
- for (i = 0; i <= placement->num_busy_placement; ++i) {
- ret = ttm_mem_type_from_flags(placement->placement[i],
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->busy_placement[i],
&mem_type);
if (ret)
return ret;
@@ -911,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user,
mem_type,
- placement->placement[i],
+ placement->busy_placement[i],
&cur_flags))
continue;
@@ -921,9 +940,17 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
- ttm_flag_masked(&cur_flags, placement->placement[i],
+ ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
+
+ if (mem_type == TTM_PL_SYSTEM) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ mem->mm_node = NULL;
+ return 0;
+ }
+
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
@@ -993,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
int i;
+ struct drm_mm_node *node = mem->mm_node;
+
+ if (node && placement->lpfn != 0 &&
+ (node->start < placement->fpfn ||
+ node->start + node->size > placement->lpfn))
+ return -1;
for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
@@ -1115,6 +1148,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
+ bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
@@ -1817,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* anyone tries to access a ttm page.
*/
+ if (bo->bdev->driver->swap_notify)
+ bo->bdev->driver->swap_notify(bo);
+
ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
out:
@@ -1837,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
;
}
+EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0c64f..5ca37a58a98 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
- save_flags = old_mem->placement;
}
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
void *old_iomap;
void *new_iomap;
int ret;
- uint32_t save_flags = old_mem->placement;
unsigned long i;
unsigned long page;
unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- uint32_t save_flags = old_mem->placement;
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 609a85a4d85..668dbe8b8dd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
return -EFAULT;
driver = bo->bdev->driver;
- if (unlikely(driver->verify_access)) {
+ if (unlikely(!driver->verify_access)) {
ret = -EPERM;
goto out_unref;
}
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4e..3d172ef04ee 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
+EXPORT_SYMBOL(ttm_suspend_unlock);
static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
{
wait_event(lock->queue, __ttm_suspend_lock(lock));
}
+EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099abac824..75e9d6f86ba 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
struct drm_hash_item hash;
struct list_head head;
struct kref kref;
- struct ttm_base_object *obj;
enum ttm_ref_type ref_type;
+ struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc5dba..3d47a2c1232 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,23 +196,34 @@ EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_state)
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
+ int ret = 0;
+
if (PageHighMem(p))
return 0;
- switch (c_state) {
- case tt_cached:
- return set_pages_wb(p, 1);
- case tt_wc:
- return set_memory_wc((unsigned long) page_address(p), 1);
- default:
- return set_pages_uc(p, 1);
+ if (c_old != tt_cached) {
+ /* p isn't in the default caching state, set it to
+ * writeback first to free its current memtype. */
+
+ ret = set_pages_wb(p, 1);
+ if (ret)
+ return ret;
}
+
+ if (c_new == tt_wc)
+ ret = set_memory_wc((unsigned long) page_address(p), 1);
+ else if (c_new == tt_uncached)
+ ret = set_pages_uc(p, 1);
+
+ return ret;
}
#else /* CONFIG_X86 */
static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_state)
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
return 0;
}
@@ -245,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
if (likely(cur_page != NULL)) {
- ret = ttm_tt_set_page_caching(cur_page, c_state);
+ ret = ttm_tt_set_page_caching(cur_page,
+ ttm->caching_state,
+ c_state);
if (unlikely(ret != 0))
goto out_err;
}
@@ -259,7 +272,7 @@ out_err:
for (j = 0; j < i; ++j) {
cur_page = ttm->pages[j];
if (likely(cur_page != NULL)) {
- (void)ttm_tt_set_page_caching(cur_page,
+ (void)ttm_tt_set_page_caching(cur_page, c_state,
ttm->caching_state);
}
}
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index bc2f5184300..7a1b210401e 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -58,7 +58,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 00000000000..f20b8bcbef3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,13 @@
+config DRM_VMWGFX
+ tristate "DRM driver for VMware Virtual GPU"
+ depends on DRM && PCI
+ select FB_DEFERRED_IO
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select DRM_TTM
+ help
+ KMS enabled DRM driver for SVGA2 virtual hardware.
+
+ If unsure say n. The compiled module will be
+ called vmwgfx.ko
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 00000000000..1a3cb6816d1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,9 @@
+
+ccflags-y := -Iinclude/drm
+
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_overlay.o
+
+obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
new file mode 100644
index 00000000000..77cb4533100
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -0,0 +1,1793 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ * SVGA 3D hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#include "svga_reg.h"
+
+
+/*
+ * 3D Hardware Version
+ *
+ * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ * register. Is set by the host and read by the guest. This lets
+ * us make new guest drivers which are backwards-compatible with old
+ * SVGA hardware revisions. It does not let us support old guest
+ * drivers. Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
+
+typedef enum {
+ SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
+ SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
+ SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
+ SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
+ SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+ SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * Generic Types
+ */
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+#define SVGA3D_NUM_CLIPPLANES 6
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
+
+
+/*
+ * Surface formats.
+ *
+ * If you modify this list, be sure to keep GLUtil.c in sync. It
+ * includes the internal format definition of each surface in
+ * GLUtil_ConvertSurfaceFormat, and it contains a table of
+ * human-readable names in GLUtil_GetFormatName.
+ */
+
+typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_INVALID = 0,
+
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_A8R8G8B8 = 2,
+
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
+
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
+
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
+
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
+
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
+
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+
+ SVGA3D_A2R10G10B10 = 26,
+
+ /* signed formats */
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
+
+ /* mixed formats */
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
+
+ SVGA3D_ALPHA8 = 32,
+
+ /* Single- and dual-component floating point formats */
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
+
+ /*
+ * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
+ * the most efficient format to use when creating new surfaces
+ * expressly for index or vertex data.
+ */
+ SVGA3D_BUFFER = 37,
+
+ SVGA3D_Z_D24X8 = 38,
+
+ SVGA3D_V16U16 = 39,
+
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
+
+ /* Packed Video formats */
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
+
+ SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+/*
+ * These match the D3DFORMAT_OP definitions used by Direct3D. We need
+ * them so that we can query the host for what the supported surface
+ * operations are (when we're using the D3D backend, in particular),
+ * and so we can send those operations to the guest.
+ */
+typedef enum {
+ SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
+ SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
+ SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
+ SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
+ SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
+ SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
+ SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+ SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip). This flag
+ * should not to be set on alpha formats.
+ */
+ SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+ SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+ SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+ SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+ SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+ SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+ SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+ SVGA3DFORMAT_OP_DMAP = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+ SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+ SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target (meaning that the
+ * pixel pipe will DE-linearize data on output to format)
+ */
+ SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+ SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+ SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+ SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate wrap
+ * modes, nor mipmapping
+ */
+ SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
+} SVGA3dFormatOp;
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*.
+ * Entries must be located at the same position.
+ */
+typedef union {
+ uint32 value;
+ struct {
+ uint32 texture : 1;
+ uint32 volumeTexture : 1;
+ uint32 cubeTexture : 1;
+ uint32 offscreenRenderTarget : 1;
+ uint32 sameFormatRenderTarget : 1;
+ uint32 unknown1 : 1;
+ uint32 zStencil : 1;
+ uint32 zStencilArbitraryDepth : 1;
+ uint32 sameFormatUpToAlpha : 1;
+ uint32 unknown2 : 1;
+ uint32 displayMode : 1;
+ uint32 acceleration3d : 1;
+ uint32 pixelSize : 1;
+ uint32 convertToARGB : 1;
+ uint32 offscreenPlain : 1;
+ uint32 sRGBRead : 1;
+ uint32 bumpMap : 1;
+ uint32 dmap : 1;
+ uint32 noFilter : 1;
+ uint32 memberOfGroupARGB : 1;
+ uint32 sRGBWrite : 1;
+ uint32 noAlphaBlend : 1;
+ uint32 autoGenMipMap : 1;
+ uint32 vertexTexture : 1;
+ uint32 noTexCoordWrapNorMip : 1;
+ };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_RS_INVALID = 0,
+ SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
+ SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
+ SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
+ SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
+ SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
+ SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
+ SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
+ SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
+ SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
+ SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
+ SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
+ SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
+ SVGA3D_RS_STENCILREF = 13, /* uint32 */
+ SVGA3D_RS_STENCILMASK = 14, /* uint32 */
+ SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
+ SVGA3D_RS_FOGSTART = 16, /* float */
+ SVGA3D_RS_FOGEND = 17, /* float */
+ SVGA3D_RS_FOGDENSITY = 18, /* float */
+ SVGA3D_RS_POINTSIZE = 19, /* float */
+ SVGA3D_RS_POINTSIZEMIN = 20, /* float */
+ SVGA3D_RS_POINTSIZEMAX = 21, /* float */
+ SVGA3D_RS_POINTSCALE_A = 22, /* float */
+ SVGA3D_RS_POINTSCALE_B = 23, /* float */
+ SVGA3D_RS_POINTSCALE_C = 24, /* float */
+ SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
+ SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
+ SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
+ SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
+ SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
+ SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
+ SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
+ SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
+ SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
+ SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
+ SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
+ SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
+ SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
+ SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
+ SVGA3D_RS_ZBIAS = 45, /* float */
+ SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
+ SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
+ SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
+ SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
+ SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
+ SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
+ SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
+ SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
+ SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
+ SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
+ SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
+ SVGA3D_RS_DEPTHBIAS = 64, /* float */
+
+
+ /*
+ * Output Gamma Level
+ *
+ * Output gamma effects the gamma curve of colors that are output from the
+ * rendering pipeline. A value of 1.0 specifies a linear color space. If the
+ * value is <= 0.0, gamma correction is ignored and linear color space is
+ * used.
+ */
+
+ SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
+ SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
+ SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
+ SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
+ SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
+ SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
+ SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
+ SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
+ SVGA3D_RS_TWEENFACTOR = 88, /* float */
+ SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
+ SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
+ SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
+ SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+ SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
+ SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
+ SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
+} SVGA3dVertexMaterial;
+
+typedef enum {
+ SVGA3D_FILLMODE_INVALID = 0,
+ SVGA3D_FILLMODE_POINT = 1,
+ SVGA3D_FILLMODE_LINE = 2,
+ SVGA3D_FILLMODE_FILL = 3,
+ SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+union {
+ struct {
+ uint16 mode; /* SVGA3dFillModeType */
+ uint16 face; /* SVGA3dFace */
+ };
+ uint32 uintValue;
+} SVGA3dFillMode;
+
+typedef enum {
+ SVGA3D_SHADEMODE_INVALID = 0,
+ SVGA3D_SHADEMODE_FLAT = 1,
+ SVGA3D_SHADEMODE_SMOOTH = 2,
+ SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
+ SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+union {
+ struct {
+ uint16 repeat;
+ uint16 pattern;
+ };
+ uint32 uintValue;
+} SVGA3dLinePattern;
+
+typedef enum {
+ SVGA3D_BLENDOP_INVALID = 0,
+ SVGA3D_BLENDOP_ZERO = 1,
+ SVGA3D_BLENDOP_ONE = 2,
+ SVGA3D_BLENDOP_SRCCOLOR = 3,
+ SVGA3D_BLENDOP_INVSRCCOLOR = 4,
+ SVGA3D_BLENDOP_SRCALPHA = 5,
+ SVGA3D_BLENDOP_INVSRCALPHA = 6,
+ SVGA3D_BLENDOP_DESTALPHA = 7,
+ SVGA3D_BLENDOP_INVDESTALPHA = 8,
+ SVGA3D_BLENDOP_DESTCOLOR = 9,
+ SVGA3D_BLENDOP_INVDESTCOLOR = 10,
+ SVGA3D_BLENDOP_SRCALPHASAT = 11,
+ SVGA3D_BLENDOP_BLENDFACTOR = 12,
+ SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
+ SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+ SVGA3D_BLENDEQ_INVALID = 0,
+ SVGA3D_BLENDEQ_ADD = 1,
+ SVGA3D_BLENDEQ_SUBTRACT = 2,
+ SVGA3D_BLENDEQ_REVSUBTRACT = 3,
+ SVGA3D_BLENDEQ_MINIMUM = 4,
+ SVGA3D_BLENDEQ_MAXIMUM = 5,
+ SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+ SVGA3D_FRONTWINDING_INVALID = 0,
+ SVGA3D_FRONTWINDING_CW = 1,
+ SVGA3D_FRONTWINDING_CCW = 2,
+ SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+ SVGA3D_FACE_INVALID = 0,
+ SVGA3D_FACE_NONE = 1,
+ SVGA3D_FACE_FRONT = 2,
+ SVGA3D_FACE_BACK = 3,
+ SVGA3D_FACE_FRONT_BACK = 4,
+ SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+ SVGA3D_CMP_INVALID = 0,
+ SVGA3D_CMP_NEVER = 1,
+ SVGA3D_CMP_LESS = 2,
+ SVGA3D_CMP_EQUAL = 3,
+ SVGA3D_CMP_LESSEQUAL = 4,
+ SVGA3D_CMP_GREATER = 5,
+ SVGA3D_CMP_NOTEQUAL = 6,
+ SVGA3D_CMP_GREATEREQUAL = 7,
+ SVGA3D_CMP_ALWAYS = 8,
+ SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+ SVGA3D_FOGFUNC_INVALID = 0,
+ SVGA3D_FOGFUNC_EXP = 1,
+ SVGA3D_FOGFUNC_EXP2 = 2,
+ SVGA3D_FOGFUNC_LINEAR = 3,
+ SVGA3D_FOGFUNC_PER_VERTEX = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+ SVGA3D_FOGTYPE_INVALID = 0,
+ SVGA3D_FOGTYPE_VERTEX = 1,
+ SVGA3D_FOGTYPE_PIXEL = 2,
+ SVGA3D_FOGTYPE_MAX = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+ SVGA3D_FOGBASE_INVALID = 0,
+ SVGA3D_FOGBASE_DEPTHBASED = 1,
+ SVGA3D_FOGBASE_RANGEBASED = 2,
+ SVGA3D_FOGBASE_MAX = 3
+} SVGA3dFogBase;
+
+typedef enum {
+ SVGA3D_STENCILOP_INVALID = 0,
+ SVGA3D_STENCILOP_KEEP = 1,
+ SVGA3D_STENCILOP_ZERO = 2,
+ SVGA3D_STENCILOP_REPLACE = 3,
+ SVGA3D_STENCILOP_INCRSAT = 4,
+ SVGA3D_STENCILOP_DECRSAT = 5,
+ SVGA3D_STENCILOP_INVERT = 6,
+ SVGA3D_STENCILOP_INCR = 7,
+ SVGA3D_STENCILOP_DECR = 8,
+ SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+ SVGA3D_CLIPPLANE_0 = (1 << 0),
+ SVGA3D_CLIPPLANE_1 = (1 << 1),
+ SVGA3D_CLIPPLANE_2 = (1 << 2),
+ SVGA3D_CLIPPLANE_3 = (1 << 3),
+ SVGA3D_CLIPPLANE_4 = (1 << 4),
+ SVGA3D_CLIPPLANE_5 = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+ SVGA3D_CLEAR_COLOR = 0x1,
+ SVGA3D_CLEAR_DEPTH = 0x2,
+ SVGA3D_CLEAR_STENCIL = 0x4
+} SVGA3dClearFlag;
+
+typedef enum {
+ SVGA3D_RT_DEPTH = 0,
+ SVGA3D_RT_STENCIL = 1,
+ SVGA3D_RT_COLOR0 = 2,
+ SVGA3D_RT_COLOR1 = 3,
+ SVGA3D_RT_COLOR2 = 4,
+ SVGA3D_RT_COLOR3 = 5,
+ SVGA3D_RT_COLOR4 = 6,
+ SVGA3D_RT_COLOR5 = 7,
+ SVGA3D_RT_COLOR6 = 8,
+ SVGA3D_RT_COLOR7 = 9,
+ SVGA3D_RT_MAX,
+ SVGA3D_RT_INVALID = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+union {
+ struct {
+ uint32 red : 1;
+ uint32 green : 1;
+ uint32 blue : 1;
+ uint32 alpha : 1;
+ };
+ uint32 uintValue;
+} SVGA3dColorMask;
+
+typedef enum {
+ SVGA3D_VBLEND_DISABLE = 0,
+ SVGA3D_VBLEND_1WEIGHT = 1,
+ SVGA3D_VBLEND_2WEIGHT = 2,
+ SVGA3D_VBLEND_3WEIGHT = 3,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+ SVGA3D_WRAPCOORD_0 = 1 << 0,
+ SVGA3D_WRAPCOORD_1 = 1 << 1,
+ SVGA3D_WRAPCOORD_2 = 1 << 2,
+ SVGA3D_WRAPCOORD_3 = 1 << 3,
+ SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_TS_INVALID = 0,
+ SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
+ SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
+ SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
+ SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
+ SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
+ SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
+ SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
+ SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
+ SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
+ SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
+ SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
+ SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
+ SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
+ SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
+ SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
+ SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
+ SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
+
+
+ /*
+ * Sampler Gamma Level
+ *
+ * Sampler gamma effects the color of samples taken from the sampler. A
+ * value of 1.0 will produce linear samples. If the value is <= 0.0 the
+ * gamma value is ignored and a linear space is used.
+ */
+
+ SVGA3D_TS_GAMMA = 25, /* float */
+ SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
+ SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
+ SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
+ SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+ SVGA3D_TC_INVALID = 0,
+ SVGA3D_TC_DISABLE = 1,
+ SVGA3D_TC_SELECTARG1 = 2,
+ SVGA3D_TC_SELECTARG2 = 3,
+ SVGA3D_TC_MODULATE = 4,
+ SVGA3D_TC_ADD = 5,
+ SVGA3D_TC_ADDSIGNED = 6,
+ SVGA3D_TC_SUBTRACT = 7,
+ SVGA3D_TC_BLENDTEXTUREALPHA = 8,
+ SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
+ SVGA3D_TC_BLENDCURRENTALPHA = 10,
+ SVGA3D_TC_BLENDFACTORALPHA = 11,
+ SVGA3D_TC_MODULATE2X = 12,
+ SVGA3D_TC_MODULATE4X = 13,
+ SVGA3D_TC_DSDT = 14,
+ SVGA3D_TC_DOTPRODUCT3 = 15,
+ SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
+ SVGA3D_TC_ADDSIGNED2X = 17,
+ SVGA3D_TC_ADDSMOOTH = 18,
+ SVGA3D_TC_PREMODULATE = 19,
+ SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
+ SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
+ SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+ SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+ SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
+ SVGA3D_TC_MULTIPLYADD = 25,
+ SVGA3D_TC_LERP = 26,
+ SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+ SVGA3D_TEX_ADDRESS_INVALID = 0,
+ SVGA3D_TEX_ADDRESS_WRAP = 1,
+ SVGA3D_TEX_ADDRESS_MIRROR = 2,
+ SVGA3D_TEX_ADDRESS_CLAMP = 3,
+ SVGA3D_TEX_ADDRESS_BORDER = 4,
+ SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+ SVGA3D_TEX_ADDRESS_EDGE = 6,
+ SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+ SVGA3D_TEX_FILTER_NONE = 0,
+ SVGA3D_TEX_FILTER_NEAREST = 1,
+ SVGA3D_TEX_FILTER_LINEAR = 2,
+ SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
+ SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+ SVGA3D_TEX_TRANSFORM_OFF = 0,
+ SVGA3D_TEX_TRANSFORM_S = (1 << 0),
+ SVGA3D_TEX_TRANSFORM_T = (1 << 1),
+ SVGA3D_TEX_TRANSFORM_R = (1 << 2),
+ SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
+ SVGA3D_TEX_PROJECTED = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+ SVGA3D_TEXCOORD_GEN_OFF = 0,
+ SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
+ SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
+ SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+ SVGA3D_TEXCOORD_GEN_SPHERE = 4,
+ SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+ SVGA3D_TA_INVALID = 0,
+ SVGA3D_TA_CONSTANT = 1,
+ SVGA3D_TA_PREVIOUS = 2,
+ SVGA3D_TA_DIFFUSE = 3,
+ SVGA3D_TA_TEXTURE = 4,
+ SVGA3D_TA_SPECULAR = 5,
+ SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+ SVGA3D_TM_NONE = 0,
+ SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
+ SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+#define SVGA3D_INVALID_ID ((uint32)-1)
+#define SVGA3D_MAX_CLIP_PLANES 6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+ SVGA3D_DECLUSAGE_POSITION = 0,
+ SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
+ SVGA3D_DECLUSAGE_BLENDINDICES, // 2
+ SVGA3D_DECLUSAGE_NORMAL, // 3
+ SVGA3D_DECLUSAGE_PSIZE, // 4
+ SVGA3D_DECLUSAGE_TEXCOORD, // 5
+ SVGA3D_DECLUSAGE_TANGENT, // 6
+ SVGA3D_DECLUSAGE_BINORMAL, // 7
+ SVGA3D_DECLUSAGE_TESSFACTOR, // 8
+ SVGA3D_DECLUSAGE_POSITIONT, // 9
+ SVGA3D_DECLUSAGE_COLOR, // 10
+ SVGA3D_DECLUSAGE_FOG, // 11
+ SVGA3D_DECLUSAGE_DEPTH, // 12
+ SVGA3D_DECLUSAGE_SAMPLE, // 13
+ SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+ SVGA3D_DECLMETHOD_DEFAULT = 0,
+ SVGA3D_DECLMETHOD_PARTIALU,
+ SVGA3D_DECLMETHOD_PARTIALV,
+ SVGA3D_DECLMETHOD_CROSSUV, // Normal
+ SVGA3D_DECLMETHOD_UV,
+ SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
+} SVGA3dDeclMethod;
+
+typedef enum {
+ SVGA3D_DECLTYPE_FLOAT1 = 0,
+ SVGA3D_DECLTYPE_FLOAT2 = 1,
+ SVGA3D_DECLTYPE_FLOAT3 = 2,
+ SVGA3D_DECLTYPE_FLOAT4 = 3,
+ SVGA3D_DECLTYPE_D3DCOLOR = 4,
+ SVGA3D_DECLTYPE_UBYTE4 = 5,
+ SVGA3D_DECLTYPE_SHORT2 = 6,
+ SVGA3D_DECLTYPE_SHORT4 = 7,
+ SVGA3D_DECLTYPE_UBYTE4N = 8,
+ SVGA3D_DECLTYPE_SHORT2N = 9,
+ SVGA3D_DECLTYPE_SHORT4N = 10,
+ SVGA3D_DECLTYPE_USHORT2N = 11,
+ SVGA3D_DECLTYPE_USHORT4N = 12,
+ SVGA3D_DECLTYPE_UDEC3 = 13,
+ SVGA3D_DECLTYPE_DEC3N = 14,
+ SVGA3D_DECLTYPE_FLOAT16_2 = 15,
+ SVGA3D_DECLTYPE_FLOAT16_4 = 16,
+ SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+ struct {
+ /*
+ * For index data, this number represents the number of instances to draw.
+ * For instance data, this number represents the number of
+ * instances/vertex in this stream
+ */
+ uint32 count : 30;
+
+ /*
+ * This is 1 if this is supposed to be the data that is repeated for
+ * every instance.
+ */
+ uint32 indexedData : 1;
+
+ /*
+ * This is 1 if this is supposed to be the per-instance data.
+ */
+ uint32 instanceData : 1;
+ };
+
+ uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+ SVGA3D_PRIMITIVE_INVALID = 0,
+ SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
+ SVGA3D_PRIMITIVE_POINTLIST = 2,
+ SVGA3D_PRIMITIVE_LINELIST = 3,
+ SVGA3D_PRIMITIVE_LINESTRIP = 4,
+ SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
+ SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
+ SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+ SVGA3D_COORDINATE_INVALID = 0,
+ SVGA3D_COORDINATE_LEFTHANDED = 1,
+ SVGA3D_COORDINATE_RIGHTHANDED = 2,
+ SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+ SVGA3D_TRANSFORM_INVALID = 0,
+ SVGA3D_TRANSFORM_WORLD = 1,
+ SVGA3D_TRANSFORM_VIEW = 2,
+ SVGA3D_TRANSFORM_PROJECTION = 3,
+ SVGA3D_TRANSFORM_TEXTURE0 = 4,
+ SVGA3D_TRANSFORM_TEXTURE1 = 5,
+ SVGA3D_TRANSFORM_TEXTURE2 = 6,
+ SVGA3D_TRANSFORM_TEXTURE3 = 7,
+ SVGA3D_TRANSFORM_TEXTURE4 = 8,
+ SVGA3D_TRANSFORM_TEXTURE5 = 9,
+ SVGA3D_TRANSFORM_TEXTURE6 = 10,
+ SVGA3D_TRANSFORM_TEXTURE7 = 11,
+ SVGA3D_TRANSFORM_WORLD1 = 12,
+ SVGA3D_TRANSFORM_WORLD2 = 13,
+ SVGA3D_TRANSFORM_WORLD3 = 14,
+ SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+ SVGA3D_LIGHTTYPE_INVALID = 0,
+ SVGA3D_LIGHTTYPE_POINT = 1,
+ SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
+ SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
+ SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
+ SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+ SVGA3D_CUBEFACE_POSX = 0,
+ SVGA3D_CUBEFACE_NEGX = 1,
+ SVGA3D_CUBEFACE_POSY = 2,
+ SVGA3D_CUBEFACE_NEGY = 3,
+ SVGA3D_CUBEFACE_POSZ = 4,
+ SVGA3D_CUBEFACE_NEGZ = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+ SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
+ SVGA3D_SHADERTYPE_VS = 1,
+ SVGA3D_SHADERTYPE_PS = 2,
+ SVGA3D_SHADERTYPE_MAX
+} SVGA3dShaderType;
+
+typedef enum {
+ SVGA3D_CONST_TYPE_FLOAT = 0,
+ SVGA3D_CONST_TYPE_INT = 1,
+ SVGA3D_CONST_TYPE_BOOL = 2,
+} SVGA3dShaderConstType;
+
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+typedef enum {
+ SVGA3D_STRETCH_BLT_POINT = 0,
+ SVGA3D_STRETCH_BLT_LINEAR = 1,
+ SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+ SVGA3D_QUERYTYPE_OCCLUSION = 0,
+ SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef enum {
+ SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
+ SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
+ SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
+ SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
+} SVGA3dQueryState;
+
+typedef enum {
+ SVGA3D_WRITE_HOST_VRAM = 1,
+ SVGA3D_READ_HOST_VRAM = 2,
+} SVGA3dTransferType;
+
+/*
+ * The maximum number vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS 32
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+#define SVGA_3D_CMD_LEGACY_BASE 1000
+#define SVGA_3D_CMD_BASE 1040
+
+#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
+#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
+#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
+#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
+#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
+#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
+#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
+#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
+#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
+#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
+#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
+#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
+#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
+#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
+#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
+#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
+#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
+#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
+#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
+#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
+#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
+#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
+#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
+#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
+#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
+#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
+#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
+#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
+#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
+#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
+#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
+
+#define SVGA_3D_CMD_FUTURE_MAX 2000
+
+/*
+ * Common substructures used in multiple FIFO commands:
+ */
+
+typedef struct {
+ union {
+ struct {
+ uint16 function; // SVGA3dFogFunction
+ uint8 type; // SVGA3dFogType
+ uint8 base; // SVGA3dFogBase
+ };
+ uint32 uintValue;
+ };
+} SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+struct SVGA3dSurfaceImageId {
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
+} SVGA3dSurfaceImageId;
+
+typedef
+struct SVGA3dGuestImage {
+ SVGAGuestPtr ptr;
+
+ /*
+ * A note on interpretation of pitch: This value of pitch is the
+ * number of bytes between vertically adjacent image
+ * blocks. Normally this is the number of bytes between the first
+ * pixel of two adjacent scanlines. With compressed textures,
+ * however, this may represent the number of bytes between
+ * compression blocks rather than between rows of pixels.
+ *
+ * XXX: Compressed textures currently must be tightly packed in guest memory.
+ *
+ * If the image is 1-dimensional, pitch is ignored.
+ *
+ * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+ * assuming each row of blocks is tightly packed.
+ */
+ uint32 pitch;
+} SVGA3dGuestImage;
+
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+struct {
+ uint32 id;
+ uint32 size;
+} SVGA3dCmdHeader;
+
+/*
+ * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
+ * optional mipmaps and cube faces.
+ */
+
+typedef
+struct {
+ uint32 width;
+ uint32 height;
+ uint32 depth;
+} SVGA3dSize;
+
+typedef enum {
+ SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+ SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
+ SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
+ SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
+ SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
+ SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
+ SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
+} SVGA3dSurfaceFlags;
+
+typedef
+struct {
+ uint32 numMipLevels;
+} SVGA3dSurfaceFace;
+
+typedef
+struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+struct {
+ uint32 sid;
+} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+struct {
+ uint32 cid;
+} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dClearFlag clearFlag;
+ uint32 color;
+ float depth;
+ uint32 stencil;
+ /* Followed by variable number of SVGA3dRect structures */
+} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
+
+typedef
+struct SVGA3dCopyRect {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+ uint32 srcx;
+ uint32 srcy;
+} SVGA3dCopyRect;
+
+typedef
+struct SVGA3dCopyBox {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+ uint32 srcx;
+ uint32 srcy;
+ uint32 srcz;
+} SVGA3dCopyBox;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+} SVGA3dRect;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+} SVGA3dBox;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+} SVGA3dPoint;
+
+typedef
+struct {
+ SVGA3dLightType type;
+ SVGA3dBool inWorldSpace;
+ float diffuse[4];
+ float specular[4];
+ float ambient[4];
+ float position[4];
+ float direction[4];
+ float range;
+ float falloff;
+ float attenuation0;
+ float attenuation1;
+ float attenuation2;
+ float theta;
+ float phi;
+} SVGA3dLightData;
+
+typedef
+struct {
+ uint32 sid;
+ /* Followed by variable number of SVGA3dCopyRect structures */
+} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
+
+typedef
+struct {
+ SVGA3dRenderStateName state;
+ union {
+ uint32 uintValue;
+ float floatValue;
+ };
+} SVGA3dRenderState;
+
+typedef
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dRenderState structures */
+} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRenderTargetType type;
+ SVGA3dSurfaceImageId target;
+} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ /* Followed by variable number of SVGA3dCopyBox structures */
+} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+ SVGA3dStretchBltMode mode;
+} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+struct {
+ /*
+ * If the discard flag is present in a surface DMA operation, the host may
+ * discard the contents of the current mipmap level and face of the target
+ * surface before applying the surface DMA contents.
+ */
+ uint32 discard : 1;
+
+ /*
+ * If the unsynchronized flag is present, the host may perform this upload
+ * without syncing to pending reads on this surface.
+ */
+ uint32 unsynchronized : 1;
+
+ /*
+ * Guests *MUST* set the reserved bits to 0 before submitting the command
+ * suffix as future flags may occupy these bits.
+ */
+ uint32 reserved : 30;
+} SVGA3dSurfaceDMAFlags;
+
+typedef
+struct {
+ SVGA3dGuestImage guest;
+ SVGA3dSurfaceImageId host;
+ SVGA3dTransferType transfer;
+ /*
+ * Followed by variable number of SVGA3dCopyBox structures. For consistency
+ * in all clipping logic and coordinate translation, we define the
+ * "source" in each copyBox as the guest image and the
+ * "destination" as the host image, regardless of transfer
+ * direction.
+ *
+ * For efficiency, the SVGA3D device is free to copy more data than
+ * specified. For example, it may round copy boxes outwards such
+ * that they lie on particular alignment boundaries.
+ */
+} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ * This is a command suffix that will appear after a SurfaceDMA command in
+ * the FIFO. It contains some extra information that hosts may use to
+ * optimize performance or protect the guest. This suffix exists to preserve
+ * backwards compatibility while also allowing for new functionality to be
+ * implemented.
+ */
+
+typedef
+struct {
+ uint32 suffixSize;
+
+ /*
+ * The maximum offset is used to determine the maximum offset from the
+ * guestPtr base address that will be accessed or written to during this
+ * surfaceDMA. If the suffix is supported, the host will respect this
+ * boundary while performing surface DMAs.
+ *
+ * Defaults to MAX_UINT32
+ */
+ uint32 maximumOffset;
+
+ /*
+ * A set of flags that describes optimizations that the host may perform
+ * while performing this surface DMA operation. The guest should never rely
+ * on behaviour that is different when these flags are set for correctness.
+ *
+ * Defaults to 0
+ */
+ SVGA3dSurfaceDMAFlags flags;
+} SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ * This command is the SVGA3D device's generic drawing entry point.
+ * It can draw multiple ranges of primitives, optionally using an
+ * index buffer, using an arbitrary collection of vertex buffers.
+ *
+ * Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ * during this draw call. The declarations specify which surface
+ * the vertex data lives in, what that vertex data is used for,
+ * and how to interpret it.
+ *
+ * Each SVGA3dPrimitiveRange defines a collection of primitives
+ * to render using the same vertex arrays. An index buffer is
+ * optional.
+ */
+
+typedef
+struct {
+ /*
+ * A range hint is an optional specification for the range of indices
+ * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+ * that the entire array will be used.
+ *
+ * These are only hints. The SVGA3D device may use them for
+ * performance optimization if possible, but it's also allowed to
+ * ignore these values.
+ */
+ uint32 first;
+ uint32 last;
+} SVGA3dArrayRangeHint;
+
+typedef
+struct {
+ /*
+ * Define the origin and shape of a vertex or index array. Both
+ * 'offset' and 'stride' are in bytes. The provided surface will be
+ * reinterpreted as a flat array of bytes in the same format used
+ * by surface DMA operations. To avoid unnecessary conversions, the
+ * surface should be created with the SVGA3D_BUFFER format.
+ *
+ * Index 0 in the array starts 'offset' bytes into the surface.
+ * Index 1 begins at byte 'offset + stride', etc. Array indices may
+ * not be negative.
+ */
+ uint32 surfaceId;
+ uint32 offset;
+ uint32 stride;
+} SVGA3dArray;
+
+typedef
+struct {
+ /*
+ * Describe a vertex array's data type, and define how it is to be
+ * used by the fixed function pipeline or the vertex shader. It
+ * isn't useful to have two VertexDecls with the same
+ * VertexArrayIdentity in one draw call.
+ */
+ SVGA3dDeclType type;
+ SVGA3dDeclMethod method;
+ SVGA3dDeclUsage usage;
+ uint32 usageIndex;
+} SVGA3dVertexArrayIdentity;
+
+typedef
+struct {
+ SVGA3dVertexArrayIdentity identity;
+ SVGA3dArray array;
+ SVGA3dArrayRangeHint rangeHint;
+} SVGA3dVertexDecl;
+
+typedef
+struct {
+ /*
+ * Define a group of primitives to render, from sequential indices.
+ *
+ * The value of 'primitiveType' and 'primitiveCount' imply the
+ * total number of vertices that will be rendered.
+ */
+ SVGA3dPrimitiveType primType;
+ uint32 primitiveCount;
+
+ /*
+ * Optional index buffer. If indexArray.surfaceId is
+ * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+ * without an index buffer is identical to rendering with an index
+ * buffer containing the sequence [0, 1, 2, 3, ...].
+ *
+ * If an index buffer is in use, indexWidth specifies the width in
+ * bytes of each index value. It must be less than or equal to
+ * indexArray.stride.
+ *
+ * (Currently, the SVGA3D device requires index buffers to be tightly
+ * packed. In other words, indexWidth == indexArray.stride)
+ */
+ SVGA3dArray indexArray;
+ uint32 indexWidth;
+
+ /*
+ * Optional index bias. This number is added to all indices from
+ * indexArray before they are used as vertex array indices. This
+ * can be used in multiple ways:
+ *
+ * - When not using an indexArray, this bias can be used to
+ * specify where in the vertex arrays to begin rendering.
+ *
+ * - A positive number here is equivalent to increasing the
+ * offset in each vertex array.
+ *
+ * - A negative number can be used to render using a small
+ * vertex array and an index buffer that contains large
+ * values. This may be used by some applications that
+ * crop a vertex buffer without modifying their index
+ * buffer.
+ *
+ * Note that rendering with a negative bias value may be slower and
+ * use more memory than rendering with a positive or zero bias.
+ */
+ int32 indexBias;
+} SVGA3dPrimitiveRange;
+
+typedef
+struct {
+ uint32 cid;
+ uint32 numVertexDecls;
+ uint32 numRanges;
+
+ /*
+ * There are two variable size arrays after the
+ * SVGA3dCmdDrawPrimitives structure. In order,
+ * they are:
+ *
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
+ * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+ * the frequency divisor for this the corresponding vertex decl)
+ */
+} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+struct {
+ uint32 stage;
+ SVGA3dTextureStateName name;
+ union {
+ uint32 value;
+ float floatValue;
+ };
+} SVGA3dTextureState;
+
+typedef
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dTextureState structures */
+} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dTransformType type;
+ float matrix[16];
+} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+struct {
+ float min;
+ float max;
+} SVGA3dZRange;
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dZRange zRange;
+} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+struct {
+ float diffuse[4];
+ float ambient[4];
+ float specular[4];
+ float emissive[4];
+ float shininess;
+} SVGA3dMaterial;
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dFace face;
+ SVGA3dMaterial material;
+} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ SVGA3dLightData data;
+} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ uint32 enabled;
+} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ float plane[4];
+} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+ /* Followed by variable number of DWORDs for shader bycode */
+} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 reg; /* register number */
+ SVGA3dShaderType type;
+ SVGA3dShaderConstType ctype;
+ uint32 values[4];
+} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dShaderType type;
+ uint32 shid;
+} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
+} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
+
+typedef
+struct {
+ uint32 cid; /* Same parameters passed to END_QUERY */
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult;
+} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+struct {
+ uint32 totalSize; /* Set by guest before query is ended. */
+ SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
+ union { /* Set by host on exit from PENDING state */
+ uint32 result32;
+ };
+} SVGA3dQueryResult;
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ * This is a blit from an SVGA3D surface to a Screen Object. Just
+ * like GMR-to-screen blits, this blit may be directed at a
+ * specific screen or to the virtual coordinate space.
+ *
+ * The blit copies from a rectangular region of an SVGA3D surface
+ * image to a rectangular region of a screen or screens.
+ *
+ * This command takes an optional variable-length list of clipping
+ * rectangles after the body of the command. If no rectangles are
+ * specified, there is no clipping region. The entire destRect is
+ * drawn to. If one or more rectangles are included, they describe
+ * a clipping region. The clip rectangle coordinates are measured
+ * relative to the top-left corner of destRect.
+ *
+ * This clipping region serves multiple purposes:
+ *
+ * - It can be used to perform an irregularly shaped blit more
+ * efficiently than by issuing many separate blit commands.
+ *
+ * - It is equivalent to allowing blits with non-integer
+ * source coordinates. You could blit just one half-pixel
+ * of a source, for example, by specifying a larger
+ * destination rectangle than you need, then removing
+ * part of it using a clip rectangle.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ *
+ * Limitations:
+ *
+ * - Currently, no backend supports blits from a mipmap or face
+ * other than the first one.
+ */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId srcImage;
+ SVGASignedRect srcRect;
+ uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
+ SVGASignedRect destRect; /* Supports scaling if src/rest different size */
+ /* Clipping: zero or more SVGASignedRects follow */
+} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+
+/*
+ * Capability query index.
+ *
+ * Notes:
+ *
+ * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ *
+ * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
+ * return TRUE. Even on physical hardware that does not support
+ * these formats natively, the SVGA3D device will provide an emulation
+ * which should be invisible to the guest OS.
+ *
+ * In general, the SVGA3D device should support any operation on
+ * any surface format, it just may perform some of these
+ * operations in software depending on the capabilities of the
+ * available physical hardware.
+ *
+ * XXX: In the future, we will add capabilities that describe in
+ * detail what formats are supported in hardware for what kinds
+ * of operations.
+ */
+
+typedef enum {
+ SVGA3D_DEVCAP_3D = 0,
+ SVGA3D_DEVCAP_MAX_LIGHTS = 1,
+ SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
+ SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
+ SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
+ SVGA3D_DEVCAP_VERTEX_SHADER = 5,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
+ SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
+ SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
+ SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
+ SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
+ SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
+ SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
+ SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
+ SVGA3D_DEVCAP_QUERY_TYPES = 15,
+ SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
+ SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
+ SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
+ SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
+ SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
+ SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
+ SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
+ SVGA3D_DEVCAP_TEXTURE_OPS = 31,
+ SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
+ SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
+ SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
+ SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
+ SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
+ SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
+ SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
+ SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
+ SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
+ SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
+ SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
+
+ /*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets. This does no include the depth or stencil targets.
+ */
+ SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
+
+ SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
+ SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
+ SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
+ SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
+ SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
+
+ /*
+ * Don't add new caps into the previous section; the values in this
+ * enumeration must not change. You can put new values right before
+ * SVGA3D_DEVCAP_MAX.
+ */
+ SVGA3D_DEVCAP_MAX /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+typedef union {
+ Bool b;
+ uint32 u;
+ int32 i;
+ float f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
new file mode 100644
index 00000000000..7b85e9b8c85
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -0,0 +1,89 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_escape.h --
+ *
+ * Definitions for our own (vendor-specific) SVGA Escape commands.
+ */
+
+#ifndef _SVGA_ESCAPE_H_
+#define _SVGA_ESCAPE_H_
+
+
+/*
+ * Namespace IDs for the escape command
+ */
+
+#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
+#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
+
+
+/*
+ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
+ * the first DWORD of escape data (after the nsID and size). As a
+ * guideline we're using the high word and low word as a major and
+ * minor command number, respectively.
+ *
+ * Major command number allocation:
+ *
+ * 0000: Reserved
+ * 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
+ * 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
+ * 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
+ */
+
+#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
+
+
+/*
+ * SVGA Hint commands.
+ *
+ * These escapes let the SVGA driver provide optional information to
+ * he host about the state of the guest or guest applications. The
+ * host can use these hints to make user interface or performance
+ * decisions.
+ *
+ * Notes:
+ *
+ * - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
+ * that use the SVGA Screen Object extension. Instead of sending
+ * this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
+ * Screen Object.
+ */
+
+#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
+
+typedef
+struct {
+ uint32 command;
+ uint32 fullscreen;
+ struct {
+ int32 x, y;
+ } monitorPosition;
+} SVGAEscapeHintFullscreen;
+
+#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
new file mode 100644
index 00000000000..f753d73c14b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -0,0 +1,201 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_overlay.h --
+ *
+ * Definitions for video-overlay support.
+ */
+
+#ifndef _SVGA_OVERLAY_H_
+#define _SVGA_OVERLAY_H_
+
+#include "svga_reg.h"
+
+/*
+ * Video formats we support
+ */
+
+#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
+#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
+#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
+
+typedef enum {
+ SVGA_OVERLAY_FORMAT_INVALID = 0,
+ SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
+ SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
+ SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
+} SVGAOverlayFormat;
+
+#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
+
+#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
+ /* FIFO escape layout:
+ * Type, Stream Id, (Register Id, Value) pairs */
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
+ /* FIFO escape layout:
+ * Type, Stream Id */
+
+typedef
+struct SVGAEscapeVideoSetRegs {
+ struct {
+ uint32 cmdType;
+ uint32 streamId;
+ } header;
+
+ // May include zero or more items.
+ struct {
+ uint32 registerId;
+ uint32 value;
+ } items[1];
+} SVGAEscapeVideoSetRegs;
+
+typedef
+struct SVGAEscapeVideoFlush {
+ uint32 cmdType;
+ uint32 streamId;
+} SVGAEscapeVideoFlush;
+
+
+/*
+ * Struct definitions for the video overlay commands built on
+ * SVGAFifoCmdEscape.
+ */
+typedef
+struct {
+ uint32 command;
+ uint32 overlay;
+} SVGAFifoEscapeCmdVideoBase;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+} SVGAFifoEscapeCmdVideoFlush;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+ struct {
+ uint32 regId;
+ uint32 value;
+ } items[1];
+} SVGAFifoEscapeCmdVideoSetRegs;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+ struct {
+ uint32 regId;
+ uint32 value;
+ } items[SVGA_VIDEO_NUM_REGS];
+} SVGAFifoEscapeCmdVideoSetAllRegs;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * VMwareVideoGetAttributes --
+ *
+ * Computes the size, pitches and offsets for YUV frames.
+ *
+ * Results:
+ * TRUE on success; otherwise FALSE on failure.
+ *
+ * Side effects:
+ * Pitches and offsets for the given YUV frame are put in 'pitches'
+ * and 'offsets' respectively. They are both optional though.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline bool
+VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
+ uint32 *width, // IN / OUT
+ uint32 *height, // IN / OUT
+ uint32 *size, // OUT
+ uint32 *pitches, // OUT (optional)
+ uint32 *offsets) // OUT (optional)
+{
+ int tmp;
+
+ *width = (*width + 1) & ~1;
+
+ if (offsets) {
+ offsets[0] = 0;
+ }
+
+ switch (format) {
+ case VMWARE_FOURCC_YV12:
+ *height = (*height + 1) & ~1;
+ *size = (*width + 3) & ~3;
+
+ if (pitches) {
+ pitches[0] = *size;
+ }
+
+ *size *= *height;
+
+ if (offsets) {
+ offsets[1] = *size;
+ }
+
+ tmp = ((*width >> 1) + 3) & ~3;
+
+ if (pitches) {
+ pitches[1] = pitches[2] = tmp;
+ }
+
+ tmp *= (*height >> 1);
+ *size += tmp;
+
+ if (offsets) {
+ offsets[2] = *size;
+ }
+
+ *size += tmp;
+ break;
+
+ case VMWARE_FOURCC_YUY2:
+ case VMWARE_FOURCC_UYVY:
+ *size = *width * 2;
+
+ if (pitches) {
+ pitches[0] = *size;
+ }
+
+ *size *= *height;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+#endif // _SVGA_OVERLAY_H_
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
new file mode 100644
index 00000000000..1b96c2ec07d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -0,0 +1,1346 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_reg.h --
+ *
+ * Virtual hardware definitions for the VMware SVGA II device.
+ */
+
+#ifndef _SVGA_REG_H_
+#define _SVGA_REG_H_
+
+/*
+ * PCI device IDs.
+ */
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
+
+/*
+ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
+ * cursor bypass mode. This is still supported, but no new guest
+ * drivers should use it.
+ */
+#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
+
+/*
+ * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
+ * The changeMap in the monitor is proportional to this number. Therefore, we'd
+ * like to keep it as small as possible to reduce monitor overhead (using
+ * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
+ * 4k!).
+ *
+ * NB: For compatibility reasons, this value must be greater than 0xff0000.
+ * See bug 335072.
+ */
+#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
+
+#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
+#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
+#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
+
+#define SVGA_MAGIC 0x900000UL
+#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
+
+/* Version 2 let the address of the frame buffer be unsigned on Win32 */
+#define SVGA_VERSION_2 2
+#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
+
+/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
+ PALETTE_BASE has moved */
+#define SVGA_VERSION_1 1
+#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
+
+/* Version 0 is the initial version */
+#define SVGA_VERSION_0 0
+#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
+
+/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+#define SVGA_ID_INVALID 0xFFFFFFFF
+
+/* Port offsets, relative to BAR0 */
+#define SVGA_INDEX_PORT 0x0
+#define SVGA_VALUE_PORT 0x1
+#define SVGA_BIOS_PORT 0x2
+#define SVGA_IRQSTATUS_PORT 0x8
+
+/*
+ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
+ *
+ * Interrupts are only supported when the
+ * SVGA_CAP_IRQMASK capability is present.
+ */
+#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
+
+/*
+ * Registers
+ */
+
+enum {
+ SVGA_REG_ID = 0,
+ SVGA_REG_ENABLE = 1,
+ SVGA_REG_WIDTH = 2,
+ SVGA_REG_HEIGHT = 3,
+ SVGA_REG_MAX_WIDTH = 4,
+ SVGA_REG_MAX_HEIGHT = 5,
+ SVGA_REG_DEPTH = 6,
+ SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
+ SVGA_REG_PSEUDOCOLOR = 8,
+ SVGA_REG_RED_MASK = 9,
+ SVGA_REG_GREEN_MASK = 10,
+ SVGA_REG_BLUE_MASK = 11,
+ SVGA_REG_BYTES_PER_LINE = 12,
+ SVGA_REG_FB_START = 13, /* (Deprecated) */
+ SVGA_REG_FB_OFFSET = 14,
+ SVGA_REG_VRAM_SIZE = 15,
+ SVGA_REG_FB_SIZE = 16,
+
+ /* ID 0 implementation only had the above registers, then the palette */
+
+ SVGA_REG_CAPABILITIES = 17,
+ SVGA_REG_MEM_START = 18, /* (Deprecated) */
+ SVGA_REG_MEM_SIZE = 19,
+ SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
+ SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
+ SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
+ SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
+ SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
+ SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
+ SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
+ SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
+ SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
+ SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
+ SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
+ SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
+ SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
+ SVGA_REG_IRQMASK = 33, /* Interrupt mask */
+
+ /* Legacy multi-monitor support */
+ SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
+ SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
+ SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
+ SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
+ SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
+ SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
+ SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
+
+ /* See "Guest memory regions" below. */
+ SVGA_REG_GMR_ID = 41,
+ SVGA_REG_GMR_DESCRIPTOR = 42,
+ SVGA_REG_GMR_MAX_IDS = 43,
+ SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
+
+ SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
+ SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
+
+ SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
+ /* Next 768 (== 256*3) registers exist for colormap */
+
+ SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
+ /* Base of scratch registers */
+ /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
+ First 4 are reserved for VESA BIOS Extension; any remaining are for
+ the use of the current SVGA driver. */
+};
+
+
+/*
+ * Guest memory regions (GMRs):
+ *
+ * This is a new memory mapping feature available in SVGA devices
+ * which have the SVGA_CAP_GMR bit set. Previously, there were two
+ * fixed memory regions available with which to share data between the
+ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
+ * are our name for an extensible way of providing arbitrary DMA
+ * buffers for use between the driver and the SVGA device. They are a
+ * new alternative to framebuffer memory, usable for both 2D and 3D
+ * graphics operations.
+ *
+ * Since GMR mapping must be done synchronously with guest CPU
+ * execution, we use a new pair of SVGA registers:
+ *
+ * SVGA_REG_GMR_ID --
+ *
+ * Read/write.
+ * This register holds the 32-bit ID (a small positive integer)
+ * of a GMR to create, delete, or redefine. Writing this register
+ * has no side-effects.
+ *
+ * SVGA_REG_GMR_DESCRIPTOR --
+ *
+ * Write-only.
+ * Writing this register will create, delete, or redefine the GMR
+ * specified by the above ID register. If this register is zero,
+ * the GMR is deleted. Any pointers into this GMR (including those
+ * currently being processed by FIFO commands) will be
+ * synchronously invalidated.
+ *
+ * If this register is nonzero, it must be the physical page
+ * number (PPN) of a data structure which describes the physical
+ * layout of the memory region this GMR should describe. The
+ * descriptor structure will be read synchronously by the SVGA
+ * device when this register is written. The descriptor need not
+ * remain allocated for the lifetime of the GMR.
+ *
+ * The guest driver should write SVGA_REG_GMR_ID first, then
+ * SVGA_REG_GMR_DESCRIPTOR.
+ *
+ * SVGA_REG_GMR_MAX_IDS --
+ *
+ * Read-only.
+ * The SVGA device may choose to support a maximum number of
+ * user-defined GMR IDs. This register holds the number of supported
+ * IDs. (The maximum supported ID plus 1)
+ *
+ * SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
+ *
+ * Read-only.
+ * The SVGA device may choose to put a limit on the total number
+ * of SVGAGuestMemDescriptor structures it will read when defining
+ * a single GMR.
+ *
+ * The descriptor structure is an array of SVGAGuestMemDescriptor
+ * structures. Each structure may do one of three things:
+ *
+ * - Terminate the GMR descriptor list.
+ * (ppn==0, numPages==0)
+ *
+ * - Add a PPN or range of PPNs to the GMR's virtual address space.
+ * (ppn != 0, numPages != 0)
+ *
+ * - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
+ * support multi-page GMR descriptor tables without forcing the
+ * driver to allocate physically contiguous memory.
+ * (ppn != 0, numPages == 0)
+ *
+ * Note that each physical page of SVGAGuestMemDescriptor structures
+ * can describe at least 2MB of guest memory. If the driver needs to
+ * use more than one page of descriptor structures, it must use one of
+ * its SVGAGuestMemDescriptors to point to an additional page. The
+ * device will never automatically cross a page boundary.
+ *
+ * Once the driver has described a GMR, it is immediately available
+ * for use via any FIFO command that uses an SVGAGuestPtr structure.
+ * These pointers include a GMR identifier plus an offset into that
+ * GMR.
+ *
+ * The driver must check the SVGA_CAP_GMR bit before using the GMR
+ * registers.
+ */
+
+/*
+ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
+ * memory as well. In the future, these IDs could even be used to
+ * allow legacy memory regions to be redefined by the guest as GMRs.
+ *
+ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
+ * is being phased out. Please try to use user-defined GMRs whenever
+ * possible.
+ */
+#define SVGA_GMR_NULL ((uint32) -1)
+#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
+
+typedef
+struct SVGAGuestMemDescriptor {
+ uint32 ppn;
+ uint32 numPages;
+} SVGAGuestMemDescriptor;
+
+typedef
+struct SVGAGuestPtr {
+ uint32 gmrId;
+ uint32 offset;
+} SVGAGuestPtr;
+
+
+/*
+ * SVGAGMRImageFormat --
+ *
+ * This is a packed representation of the source 2D image format
+ * for a GMR-to-screen blit. Currently it is defined as an encoding
+ * of the screen's color depth and bits-per-pixel, however, 16 bits
+ * are reserved for future use to identify other encodings (such as
+ * RGBA or higher-precision images).
+ *
+ * Currently supported formats:
+ *
+ * bpp depth Format Name
+ * --- ----- -----------
+ * 32 24 32-bit BGRX
+ * 24 24 24-bit BGR
+ * 16 16 RGB 5-6-5
+ * 16 15 RGB 5-5-5
+ *
+ */
+
+typedef
+struct SVGAGMRImageFormat {
+ union {
+ struct {
+ uint32 bitsPerPixel : 8;
+ uint32 colorDepth : 8;
+ uint32 reserved : 16; // Must be zero
+ };
+
+ uint32 value;
+ };
+} SVGAGMRImageFormat;
+
+/*
+ * SVGAColorBGRX --
+ *
+ * A 24-bit color format (BGRX), which does not depend on the
+ * format of the legacy guest framebuffer (GFB) or the current
+ * GMRFB state.
+ */
+
+typedef
+struct SVGAColorBGRX {
+ union {
+ struct {
+ uint32 b : 8;
+ uint32 g : 8;
+ uint32 r : 8;
+ uint32 x : 8; // Unused
+ };
+
+ uint32 value;
+ };
+} SVGAColorBGRX;
+
+
+/*
+ * SVGASignedRect --
+ * SVGASignedPoint --
+ *
+ * Signed rectangle and point primitives. These are used by the new
+ * 2D primitives for drawing to Screen Objects, which can occupy a
+ * signed virtual coordinate space.
+ *
+ * SVGASignedRect specifies a half-open interval: the (left, top)
+ * pixel is part of the rectangle, but the (right, bottom) pixel is
+ * not.
+ */
+
+typedef
+struct SVGASignedRect {
+ int32 left;
+ int32 top;
+ int32 right;
+ int32 bottom;
+} SVGASignedRect;
+
+typedef
+struct SVGASignedPoint {
+ int32 x;
+ int32 y;
+} SVGASignedPoint;
+
+
+/*
+ * Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ */
+
+#define SVGA_CAP_NONE 0x00000000
+#define SVGA_CAP_RECT_COPY 0x00000002
+#define SVGA_CAP_CURSOR 0x00000020
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_8BIT_EMULATION 0x00000100
+#define SVGA_CAP_ALPHA_CURSOR 0x00000200
+#define SVGA_CAP_3D 0x00004000
+#define SVGA_CAP_EXTENDED_FIFO 0x00008000
+#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
+#define SVGA_CAP_PITCHLOCK 0x00020000
+#define SVGA_CAP_IRQMASK 0x00040000
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
+#define SVGA_CAP_GMR 0x00100000
+#define SVGA_CAP_TRACES 0x00200000
+
+
+/*
+ * FIFO register indices.
+ *
+ * The FIFO is a chunk of device memory mapped into guest physmem. It
+ * is always treated as 32-bit words.
+ *
+ * The guest driver gets to decide how to partition it between
+ * - FIFO registers (there are always at least 4, specifying where the
+ * following data area is and how much data it contains; there may be
+ * more registers following these, depending on the FIFO protocol
+ * version in use)
+ * - FIFO data, written by the guest and slurped out by the VMX.
+ * These indices are 32-bit word offsets into the FIFO.
+ */
+
+enum {
+ /*
+ * Block 1 (basic registers): The originally defined FIFO registers.
+ * These exist and are valid for all versions of the FIFO protocol.
+ */
+
+ SVGA_FIFO_MIN = 0,
+ SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
+ SVGA_FIFO_NEXT_CMD,
+ SVGA_FIFO_STOP,
+
+ /*
+ * Block 2 (extended registers): Mandatory registers for the extended
+ * FIFO. These exist if the SVGA caps register includes
+ * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
+ * associated capability bit is enabled.
+ *
+ * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
+ * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
+ * This means that the guest has to test individually (in most cases
+ * using FIFO caps) for the presence of registers after this; the VMX
+ * can define "extended FIFO" to mean whatever it wants, and currently
+ * won't enable it unless there's room for that set and much more.
+ */
+
+ SVGA_FIFO_CAPABILITIES = 4,
+ SVGA_FIFO_FLAGS,
+ // Valid with SVGA_FIFO_CAP_FENCE:
+ SVGA_FIFO_FENCE,
+
+ /*
+ * Block 3a (optional extended registers): Additional registers for the
+ * extended FIFO, whose presence isn't actually implied by
+ * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
+ * leave room for them.
+ *
+ * These in block 3a, the VMX currently considers mandatory for the
+ * extended FIFO.
+ */
+
+ // Valid if exists (i.e. if extended FIFO enabled):
+ SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
+ // Valid with SVGA_FIFO_CAP_PITCHLOCK:
+ SVGA_FIFO_PITCHLOCK,
+
+ // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
+ SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
+ SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
+ SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
+ SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
+ SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
+
+ // Valid with SVGA_FIFO_CAP_RESERVE:
+ SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
+
+ /*
+ * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
+ *
+ * By default this is SVGA_ID_INVALID, to indicate that the cursor
+ * coordinates are specified relative to the virtual root. If this
+ * is set to a specific screen ID, cursor position is reinterpreted
+ * as a signed offset relative to that screen's origin. This is the
+ * only way to place the cursor on a non-rooted screen.
+ */
+ SVGA_FIFO_CURSOR_SCREEN_ID,
+
+ /*
+ * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
+ * registers, but this must be done carefully and with judicious use of
+ * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
+ * enough to tell you whether the register exists: we've shipped drivers
+ * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
+ * the earlier ones. The actual order of introduction was:
+ * - PITCHLOCK
+ * - 3D_CAPS
+ * - CURSOR_* (cursor bypass 3)
+ * - RESERVED
+ * So, code that wants to know whether it can use any of the
+ * aforementioned registers, or anything else added after PITCHLOCK and
+ * before 3D_CAPS, needs to reason about something other than
+ * SVGA_FIFO_MIN.
+ */
+
+ /*
+ * 3D caps block space; valid with 3D hardware version >=
+ * SVGA3D_HWVERSION_WS6_B1.
+ */
+ SVGA_FIFO_3D_CAPS = 32,
+ SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
+
+ /*
+ * End of VMX's current definition of "extended-FIFO registers".
+ * Registers before here are always enabled/disabled as a block; either
+ * the extended FIFO is enabled and includes all preceding registers, or
+ * it's disabled entirely.
+ *
+ * Block 3b (truly optional extended registers): Additional registers for
+ * the extended FIFO, which the VMX already knows how to enable and
+ * disable with correct granularity.
+ *
+ * Registers after here exist if and only if the guest SVGA driver
+ * sets SVGA_FIFO_MIN high enough to leave room for them.
+ */
+
+ // Valid if register exists:
+ SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
+ SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
+ SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
+
+ /*
+ * Always keep this last. This defines the maximum number of
+ * registers we know about. At power-on, this value is placed in
+ * the SVGA_REG_MEM_REGS register, and we expect the guest driver
+ * to allocate this much space in FIFO memory for registers.
+ */
+ SVGA_FIFO_NUM_REGS
+};
+
+
+/*
+ * Definition of registers included in extended FIFO support.
+ *
+ * The guest SVGA driver gets to allocate the FIFO between registers
+ * and data. It must always allocate at least 4 registers, but old
+ * drivers stopped there.
+ *
+ * The VMX will enable extended FIFO support if and only if the guest
+ * left enough room for all registers defined as part of the mandatory
+ * set for the extended FIFO.
+ *
+ * Note that the guest drivers typically allocate the FIFO only at
+ * initialization time, not at mode switches, so it's likely that the
+ * number of FIFO registers won't change without a reboot.
+ *
+ * All registers less than this value are guaranteed to be present if
+ * svgaUser->fifo.extended is set. Any later registers must be tested
+ * individually for compatibility at each use (in the VMX).
+ *
+ * This value is used only by the VMX, so it can change without
+ * affecting driver compatibility; keep it that way?
+ */
+#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
+
+
+/*
+ * FIFO Synchronization Registers
+ *
+ * This explains the relationship between the various FIFO
+ * sync-related registers in IOSpace and in FIFO space.
+ *
+ * SVGA_REG_SYNC --
+ *
+ * The SYNC register can be used in two different ways by the guest:
+ *
+ * 1. If the guest wishes to fully sync (drain) the FIFO,
+ * it will write once to SYNC then poll on the BUSY
+ * register. The FIFO is sync'ed once BUSY is zero.
+ *
+ * 2. If the guest wants to asynchronously wake up the host,
+ * it will write once to SYNC without polling on BUSY.
+ * Ideally it will do this after some new commands have
+ * been placed in the FIFO, and after reading a zero
+ * from SVGA_FIFO_BUSY.
+ *
+ * (1) is the original behaviour that SYNC was designed to
+ * support. Originally, a write to SYNC would implicitly
+ * trigger a read from BUSY. This causes us to synchronously
+ * process the FIFO.
+ *
+ * This behaviour has since been changed so that writing SYNC
+ * will *not* implicitly cause a read from BUSY. Instead, it
+ * makes a channel call which asynchronously wakes up the MKS
+ * thread.
+ *
+ * New guests can use this new behaviour to implement (2)
+ * efficiently. This lets guests get the host's attention
+ * without waiting for the MKS to poll, which gives us much
+ * better CPU utilization on SMP hosts and on UP hosts while
+ * we're blocked on the host GPU.
+ *
+ * Old guests shouldn't notice the behaviour change. SYNC was
+ * never guaranteed to process the entire FIFO, since it was
+ * bounded to a particular number of CPU cycles. Old guests will
+ * still loop on the BUSY register until the FIFO is empty.
+ *
+ * Writing to SYNC currently has the following side-effects:
+ *
+ * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
+ * - Asynchronously wakes up the MKS thread for FIFO processing
+ * - The value written to SYNC is recorded as a "reason", for
+ * stats purposes.
+ *
+ * If SVGA_FIFO_BUSY is available, drivers are advised to only
+ * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
+ * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
+ * eventually set SVGA_FIFO_BUSY on its own, but this approach
+ * lets the driver avoid sending multiple asynchronous wakeup
+ * messages to the MKS thread.
+ *
+ * SVGA_REG_BUSY --
+ *
+ * This register is set to TRUE when SVGA_REG_SYNC is written,
+ * and it reads as FALSE when the FIFO has been completely
+ * drained.
+ *
+ * Every read from this register causes us to synchronously
+ * process FIFO commands. There is no guarantee as to how many
+ * commands each read will process.
+ *
+ * CPU time spent processing FIFO commands will be billed to
+ * the guest.
+ *
+ * New drivers should avoid using this register unless they
+ * need to guarantee that the FIFO is completely drained. It
+ * is overkill for performing a sync-to-fence. Older drivers
+ * will use this register for any type of synchronization.
+ *
+ * SVGA_FIFO_BUSY --
+ *
+ * This register is a fast way for the guest driver to check
+ * whether the FIFO is already being processed. It reads and
+ * writes at normal RAM speeds, with no monitor intervention.
+ *
+ * If this register reads as TRUE, the host is guaranteeing that
+ * any new commands written into the FIFO will be noticed before
+ * the MKS goes back to sleep.
+ *
+ * If this register reads as FALSE, no such guarantee can be
+ * made.
+ *
+ * The guest should use this register to quickly determine
+ * whether or not it needs to wake up the host. If the guest
+ * just wrote a command or group of commands that it would like
+ * the host to begin processing, it should:
+ *
+ * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
+ * action is necessary.
+ *
+ * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
+ * code that we've already sent a SYNC to the host and we
+ * don't need to send a duplicate.
+ *
+ * 3. Write a reason to SVGA_REG_SYNC. This will send an
+ * asynchronous wakeup to the MKS thread.
+ */
+
+
+/*
+ * FIFO Capabilities
+ *
+ * Fence -- Fence register and command are supported
+ * Accel Front -- Front buffer only commands are supported
+ * Pitch Lock -- Pitch lock register is supported
+ * Video -- SVGA Video overlay units are supported
+ * Escape -- Escape command is supported
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ * of the new features that each capability provides.
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT --
+ *
+ * Provides dynamic multi-screen rendering, for improved Unity and
+ * multi-monitor modes. With Screen Object, the guest can
+ * dynamically create and destroy 'screens', which can represent
+ * Unity windows or virtual monitors. Screen Object also provides
+ * strong guarantees that DMA operations happen only when
+ * guest-initiated. Screen Object deprecates the BAR1 guest
+ * framebuffer (GFB) and all commands that work only with the GFB.
+ *
+ * New registers:
+ * FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
+ *
+ * New 2D commands:
+ * DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
+ * BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
+ *
+ * New 3D commands:
+ * BLIT_SURFACE_TO_SCREEN
+ *
+ * New guarantees:
+ *
+ * - The host will not read or write guest memory, including the GFB,
+ * except when explicitly initiated by a DMA command.
+ *
+ * - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
+ * is guaranteed to complete before any subsequent FENCEs.
+ *
+ * - All legacy commands which affect a Screen (UPDATE, PRESENT,
+ * PRESENT_READBACK) as well as new Screen blit commands will
+ * all behave consistently as blits, and memory will be read
+ * or written in FIFO order.
+ *
+ * For example, if you PRESENT from one SVGA3D surface to multiple
+ * places on the screen, the data copied will always be from the
+ * SVGA3D surface at the time the PRESENT was issued in the FIFO.
+ * This was not necessarily true on devices without Screen Object.
+ *
+ * This means that on devices that support Screen Object, the
+ * PRESENT_READBACK command should not be necessary unless you
+ * actually want to read back the results of 3D rendering into
+ * system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
+ * command provides a strict superset of functionality.)
+ *
+ * - When a screen is resized, either using Screen Object commands or
+ * legacy multimon registers, its contents are preserved.
+ */
+
+#define SVGA_FIFO_CAP_NONE 0
+#define SVGA_FIFO_CAP_FENCE (1<<0)
+#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
+#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
+#define SVGA_FIFO_CAP_VIDEO (1<<3)
+#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
+#define SVGA_FIFO_CAP_ESCAPE (1<<5)
+#define SVGA_FIFO_CAP_RESERVE (1<<6)
+#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
+
+
+/*
+ * FIFO Flags
+ *
+ * Accel Front -- Driver should use front buffer only commands
+ */
+
+#define SVGA_FIFO_FLAG_NONE 0
+#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
+#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
+
+/*
+ * FIFO reservation sentinel value
+ */
+
+#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
+
+
+/*
+ * Video overlay support
+ */
+
+#define SVGA_NUM_OVERLAY_UNITS 32
+
+
+/*
+ * Video capabilities that the guest is currently using
+ */
+
+#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
+
+
+/*
+ * Offsets for the video overlay registers
+ */
+
+enum {
+ SVGA_VIDEO_ENABLED = 0,
+ SVGA_VIDEO_FLAGS,
+ SVGA_VIDEO_DATA_OFFSET,
+ SVGA_VIDEO_FORMAT,
+ SVGA_VIDEO_COLORKEY,
+ SVGA_VIDEO_SIZE, // Deprecated
+ SVGA_VIDEO_WIDTH,
+ SVGA_VIDEO_HEIGHT,
+ SVGA_VIDEO_SRC_X,
+ SVGA_VIDEO_SRC_Y,
+ SVGA_VIDEO_SRC_WIDTH,
+ SVGA_VIDEO_SRC_HEIGHT,
+ SVGA_VIDEO_DST_X, // Signed int32
+ SVGA_VIDEO_DST_Y, // Signed int32
+ SVGA_VIDEO_DST_WIDTH,
+ SVGA_VIDEO_DST_HEIGHT,
+ SVGA_VIDEO_PITCH_1,
+ SVGA_VIDEO_PITCH_2,
+ SVGA_VIDEO_PITCH_3,
+ SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
+ SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
+ SVGA_VIDEO_NUM_REGS
+};
+
+
+/*
+ * SVGA Overlay Units
+ *
+ * width and height relate to the entire source video frame.
+ * srcX, srcY, srcWidth and srcHeight represent subset of the source
+ * video frame to be displayed.
+ */
+
+typedef struct SVGAOverlayUnit {
+ uint32 enabled;
+ uint32 flags;
+ uint32 dataOffset;
+ uint32 format;
+ uint32 colorKey;
+ uint32 size;
+ uint32 width;
+ uint32 height;
+ uint32 srcX;
+ uint32 srcY;
+ uint32 srcWidth;
+ uint32 srcHeight;
+ int32 dstX;
+ int32 dstY;
+ uint32 dstWidth;
+ uint32 dstHeight;
+ uint32 pitches[3];
+ uint32 dataGMRId;
+ uint32 dstScreenId;
+} SVGAOverlayUnit;
+
+
+/*
+ * SVGAScreenObject --
+ *
+ * This is a new way to represent a guest's multi-monitor screen or
+ * Unity window. Screen objects are only supported if the
+ * SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
+ *
+ * If Screen Objects are supported, they can be used to fully
+ * replace the functionality provided by the framebuffer registers
+ * (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
+ *
+ * The screen object is a struct with guaranteed binary
+ * compatibility. New flags can be added, and the struct may grow,
+ * but existing fields must retain their meaning.
+ *
+ */
+
+#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
+
+typedef
+struct SVGAScreenObject {
+ uint32 structSize; // sizeof(SVGAScreenObject)
+ uint32 id;
+ uint32 flags;
+ struct {
+ uint32 width;
+ uint32 height;
+ } size;
+ struct {
+ int32 x;
+ int32 y;
+ } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
+} SVGAScreenObject;
+
+
+/*
+ * Commands in the command FIFO:
+ *
+ * Command IDs defined below are used for the traditional 2D FIFO
+ * communication (not all commands are available for all versions of the
+ * SVGA FIFO protocol).
+ *
+ * Note the holes in the command ID numbers: These commands have been
+ * deprecated, and the old IDs must not be reused.
+ *
+ * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
+ * protocol.
+ *
+ * Each command's parameters are described by the comments and
+ * structs below.
+ */
+
+typedef enum {
+ SVGA_CMD_INVALID_CMD = 0,
+ SVGA_CMD_UPDATE = 1,
+ SVGA_CMD_RECT_COPY = 3,
+ SVGA_CMD_DEFINE_CURSOR = 19,
+ SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
+ SVGA_CMD_UPDATE_VERBOSE = 25,
+ SVGA_CMD_FRONT_ROP_FILL = 29,
+ SVGA_CMD_FENCE = 30,
+ SVGA_CMD_ESCAPE = 33,
+ SVGA_CMD_DEFINE_SCREEN = 34,
+ SVGA_CMD_DESTROY_SCREEN = 35,
+ SVGA_CMD_DEFINE_GMRFB = 36,
+ SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
+ SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
+ SVGA_CMD_ANNOTATION_FILL = 39,
+ SVGA_CMD_ANNOTATION_COPY = 40,
+ SVGA_CMD_MAX
+} SVGAFifoCmdId;
+
+#define SVGA_CMD_MAX_ARGS 64
+
+
+/*
+ * SVGA_CMD_UPDATE --
+ *
+ * This is a DMA transfer which copies from the Guest Framebuffer
+ * (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
+ * intersect with the provided virtual rectangle.
+ *
+ * This command does not support using arbitrary guest memory as a
+ * data source- it only works with the pre-defined GFB memory.
+ * This command also does not support signed virtual coordinates.
+ * If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
+ * negative root x/y coordinates, the negative portion of those
+ * screens will not be reachable by this command.
+ *
+ * This command is not necessary when using framebuffer
+ * traces. Traces are automatically enabled if the SVGA FIFO is
+ * disabled, and you may explicitly enable/disable traces using
+ * SVGA_REG_TRACES. With traces enabled, any write to the GFB will
+ * automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
+ *
+ * Traces and SVGA_CMD_UPDATE are the only supported ways to render
+ * pseudocolor screen updates. The newer Screen Object commands
+ * only support true color formats.
+ *
+ * Availability:
+ * Always available.
+ */
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+} SVGAFifoCmdUpdate;
+
+
+/*
+ * SVGA_CMD_RECT_COPY --
+ *
+ * Perform a rectangular DMA transfer from one area of the GFB to
+ * another, and copy the result to any screens which intersect it.
+ *
+ * Availability:
+ * SVGA_CAP_RECT_COPY
+ */
+
+typedef
+struct {
+ uint32 srcX;
+ uint32 srcY;
+ uint32 destX;
+ uint32 destY;
+ uint32 width;
+ uint32 height;
+} SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_CURSOR --
+ *
+ * Provide a new cursor image, as an AND/XOR mask.
+ *
+ * The recommended way to position the cursor overlay is by using
+ * the SVGA_FIFO_CURSOR_* registers, supported by the
+ * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ * SVGA_CAP_CURSOR
+ */
+
+typedef
+struct {
+ uint32 id; // Reserved, must be zero.
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ /*
+ * Followed by scanline data for AND mask, then XOR mask.
+ * Each scanline is padded to a 32-bit boundary.
+ */
+} SVGAFifoCmdDefineCursor;
+
+
+/*
+ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
+ *
+ * Provide a new cursor image, in 32-bit BGRA format.
+ *
+ * The recommended way to position the cursor overlay is by using
+ * the SVGA_FIFO_CURSOR_* registers, supported by the
+ * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ * SVGA_CAP_ALPHA_CURSOR
+ */
+
+typedef
+struct {
+ uint32 id; // Reserved, must be zero.
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ /* Followed by scanline data */
+} SVGAFifoCmdDefineAlphaCursor;
+
+
+/*
+ * SVGA_CMD_UPDATE_VERBOSE --
+ *
+ * Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
+ * 'reason' value, an opaque cookie which is used by internal
+ * debugging tools. Third party drivers should not use this
+ * command.
+ *
+ * Availability:
+ * SVGA_CAP_EXTENDED_FIFO
+ */
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+ uint32 reason;
+} SVGAFifoCmdUpdateVerbose;
+
+
+/*
+ * SVGA_CMD_FRONT_ROP_FILL --
+ *
+ * This is a hint which tells the SVGA device that the driver has
+ * just filled a rectangular region of the GFB with a solid
+ * color. Instead of reading these pixels from the GFB, the device
+ * can assume that they all equal 'color'. This is primarily used
+ * for remote desktop protocols.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_ACCELFRONT
+ */
+
+#define SVGA_ROP_COPY 0x03
+
+typedef
+struct {
+ uint32 color; // In the same format as the GFB
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+ uint32 rop; // Must be SVGA_ROP_COPY
+} SVGAFifoCmdFrontRopFill;
+
+
+/*
+ * SVGA_CMD_FENCE --
+ *
+ * Insert a synchronization fence. When the SVGA device reaches
+ * this command, it will copy the 'fence' value into the
+ * SVGA_FIFO_FENCE register. It will also compare the fence against
+ * SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
+ * SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
+ * raise this interrupt.
+ *
+ * Availability:
+ * SVGA_FIFO_FENCE for this command,
+ * SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
+ */
+
+typedef
+struct {
+ uint32 fence;
+} SVGAFifoCmdFence;
+
+
+/*
+ * SVGA_CMD_ESCAPE --
+ *
+ * Send an extended or vendor-specific variable length command.
+ * This is used for video overlay, third party plugins, and
+ * internal debugging tools. See svga_escape.h
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_ESCAPE
+ */
+
+typedef
+struct {
+ uint32 nsid;
+ uint32 size;
+ /* followed by 'size' bytes of data */
+} SVGAFifoCmdEscape;
+
+
+/*
+ * SVGA_CMD_DEFINE_SCREEN --
+ *
+ * Define or redefine an SVGAScreenObject. See the description of
+ * SVGAScreenObject above. The video driver is responsible for
+ * generating new screen IDs. They should be small positive
+ * integers. The virtual device will have an implementation
+ * specific upper limit on the number of screen IDs
+ * supported. Drivers are responsible for recycling IDs. The first
+ * valid ID is zero.
+ *
+ * - Interaction with other registers:
+ *
+ * For backwards compatibility, when the GFB mode registers (WIDTH,
+ * HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
+ * deletes all screens other than screen #0, and redefines screen
+ * #0 according to the specified mode. Drivers that use
+ * SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
+ *
+ * If you use screen objects, do not use the legacy multi-mon
+ * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAScreenObject screen; // Variable-length according to version
+} SVGAFifoCmdDefineScreen;
+
+
+/*
+ * SVGA_CMD_DESTROY_SCREEN --
+ *
+ * Destroy an SVGAScreenObject. Its ID is immediately available for
+ * re-use.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ uint32 screenId;
+} SVGAFifoCmdDestroyScreen;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMRFB --
+ *
+ * This command sets a piece of SVGA device state called the
+ * Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
+ * piece of light-weight state which identifies the location and
+ * format of an image in guest memory or in BAR1. The GMRFB has
+ * an arbitrary size, and it doesn't need to match the geometry
+ * of the GFB or any screen object.
+ *
+ * The GMRFB can be redefined as often as you like. You could
+ * always use the same GMRFB, you could redefine it before
+ * rendering from a different guest screen, or you could even
+ * redefine it before every blit.
+ *
+ * There are multiple ways to use this command. The simplest way is
+ * to use it to move the framebuffer either to elsewhere in the GFB
+ * (BAR1) memory region, or to a user-defined GMR. This lets a
+ * driver use a framebuffer allocated entirely out of normal system
+ * memory, which we encourage.
+ *
+ * Another way to use this command is to set up a ring buffer of
+ * updates in GFB memory. If a driver wants to ensure that no
+ * frames are skipped by the SVGA device, it is important that the
+ * driver not modify the source data for a blit until the device is
+ * done processing the command. One efficient way to accomplish
+ * this is to use a ring of small DMA buffers. Each buffer is used
+ * for one blit, then we move on to the next buffer in the
+ * ring. The FENCE mechanism is used to protect each buffer from
+ * re-use until the device is finished with that buffer's
+ * corresponding blit.
+ *
+ * This command does not affect the meaning of SVGA_CMD_UPDATE.
+ * UPDATEs always occur from the legacy GFB memory area. This
+ * command has no support for pseudocolor GMRFBs. Currently only
+ * true-color 15, 16, and 24-bit depths are supported. Future
+ * devices may expose capabilities for additional framebuffer
+ * formats.
+ *
+ * The default GMRFB value is undefined. Drivers must always send
+ * this command at least once before performing any blit from the
+ * GMRFB.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAGuestPtr ptr;
+ uint32 bytesPerLine;
+ SVGAGMRImageFormat format;
+} SVGAFifoCmdDefineGMRFB;
+
+
+/*
+ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
+ *
+ * This is a guest-to-host blit. It performs a DMA operation to
+ * copy a rectangular region of pixels from the current GMRFB to
+ * one or more Screen Objects.
+ *
+ * The destination coordinate may be specified relative to a
+ * screen's origin (if a screen ID is specified) or relative to the
+ * virtual coordinate system's origin (if the screen ID is
+ * SVGA_ID_INVALID). The actual destination may span zero or more
+ * screens, in the case of a virtual destination rect or a rect
+ * which extends off the edge of the specified screen.
+ *
+ * This command writes to the screen's "base layer": the underlying
+ * framebuffer which exists below any cursor or video overlays. No
+ * action is necessary to explicitly hide or update any overlays
+ * which exist on top of the updated region.
+ *
+ * The SVGA device is guaranteed to finish reading from the GMRFB
+ * by the time any subsequent FENCE commands are reached.
+ *
+ * This command consumes an annotation. See the
+ * SVGA_CMD_ANNOTATION_* commands for details.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint srcOrigin;
+ SVGASignedRect destRect;
+ uint32 destScreenId;
+} SVGAFifoCmdBlitGMRFBToScreen;
+
+
+/*
+ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
+ *
+ * This is a host-to-guest blit. It performs a DMA operation to
+ * copy a rectangular region of pixels from a single Screen Object
+ * back to the current GMRFB.
+ *
+ * Usage note: This command should be used rarely. It will
+ * typically be inefficient, but it is necessary for some types of
+ * synchronization between 3D (GPU) and 2D (CPU) rendering into
+ * overlapping areas of a screen.
+ *
+ * The source coordinate is specified relative to a screen's
+ * origin. The provided screen ID must be valid. If any parameters
+ * are invalid, the resulting pixel values are undefined.
+ *
+ * This command reads the screen's "base layer". Overlays like
+ * video and cursor are not included, but any data which was sent
+ * using a blit-to-screen primitive will be available, no matter
+ * whether the data's original source was the GMRFB or the 3D
+ * acceleration hardware.
+ *
+ * Note that our guest-to-host blits and host-to-guest blits aren't
+ * symmetric in their current implementation. While the parameters
+ * are identical, host-to-guest blits are a lot less featureful.
+ * They do not support clipping: If the source parameters don't
+ * fully fit within a screen, the blit fails. They must originate
+ * from exactly one screen. Virtual coordinates are not directly
+ * supported.
+ *
+ * Host-to-guest blits do support the same set of GMRFB formats
+ * offered by guest-to-host blits.
+ *
+ * The SVGA device is guaranteed to finish writing to the GMRFB by
+ * the time any subsequent FENCE commands are reached.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint destOrigin;
+ SVGASignedRect srcRect;
+ uint32 srcScreenId;
+} SVGAFifoCmdBlitScreenToGMRFB;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_FILL --
+ *
+ * This is a blit annotation. This command stores a small piece of
+ * device state which is consumed by the next blit-to-screen
+ * command. The state is only cleared by commands which are
+ * specifically documented as consuming an annotation. Other
+ * commands (such as ESCAPEs for debugging) may intervene between
+ * the annotation and its associated blit.
+ *
+ * This annotation is a promise about the contents of the next
+ * blit: The video driver is guaranteeing that all pixels in that
+ * blit will have the same value, specified here as a color in
+ * SVGAColorBGRX format.
+ *
+ * The SVGA device can still render the blit correctly even if it
+ * ignores this annotation, but the annotation may allow it to
+ * perform the blit more efficiently, for example by ignoring the
+ * source data and performing a fill in hardware.
+ *
+ * This annotation is most important for performance when the
+ * user's display is being remoted over a network connection.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAColorBGRX color;
+} SVGAFifoCmdAnnotationFill;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_COPY --
+ *
+ * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
+ * information about annotations.
+ *
+ * This annotation is a promise about the contents of the next
+ * blit: The video driver is guaranteeing that all pixels in that
+ * blit will have the same value as those which already exist at an
+ * identically-sized region on the same or a different screen.
+ *
+ * Note that the source pixels for the COPY in this annotation are
+ * sampled before applying the anqnotation's associated blit. They
+ * are allowed to overlap with the blit's destination pixels.
+ *
+ * The copy source rectangle is specified the same way as the blit
+ * destination: it can be a rectangle which spans zero or more
+ * screens, specified relative to either a screen or to the virtual
+ * coordinate system's origin. If the source rectangle includes
+ * pixels which are not from exactly one screen, the results are
+ * undefined.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint srcOrigin;
+ uint32 srcScreenId;
+} SVGAFifoCmdAnnotationCopy;
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
new file mode 100644
index 00000000000..55836dedcfc
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_types.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Silly typedefs for the svga headers. Currently the headers are shared
+ * between all components that talk to svga. And as such the headers are
+ * are in a completely different style and use weird defines.
+ *
+ * This file lets all the ugly be prefixed with svga*.
+ */
+
+#ifndef _SVGA_TYPES_H_
+#define _SVGA_TYPES_H_
+
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+typedef uint8_t uint8;
+typedef int32_t int32;
+typedef bool Bool;
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
new file mode 100644
index 00000000000..825ebe3d89d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -0,0 +1,252 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+
+static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_CACHED;
+
+static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
+static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
+ TTM_PL_FLAG_CACHED;
+
+struct ttm_placement vmw_vram_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vram_placement_flags
+};
+
+struct ttm_placement vmw_vram_sys_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_vram_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vram_ne_placement_flags
+};
+
+struct ttm_placement vmw_sys_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
+struct vmw_ttm_backend {
+ struct ttm_backend backend;
+};
+
+static int vmw_ttm_populate(struct ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
+{
+ return 0;
+}
+
+static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+{
+ return 0;
+}
+
+static int vmw_ttm_unbind(struct ttm_backend *backend)
+{
+ return 0;
+}
+
+static void vmw_ttm_clear(struct ttm_backend *backend)
+{
+}
+
+static void vmw_ttm_destroy(struct ttm_backend *backend)
+{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ kfree(vmw_be);
+}
+
+static struct ttm_backend_func vmw_ttm_func = {
+ .populate = vmw_ttm_populate,
+ .clear = vmw_ttm_clear,
+ .bind = vmw_ttm_bind,
+ .unbind = vmw_ttm_unbind,
+ .destroy = vmw_ttm_destroy,
+};
+
+struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+{
+ struct vmw_ttm_backend *vmw_be;
+
+ vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+ if (!vmw_be)
+ return NULL;
+
+ vmw_be->backend.func = &vmw_ttm_func;
+
+ return &vmw_be->backend;
+}
+
+int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ return 0;
+}
+
+int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ /* "On-card" video ram */
+ man->gpu_offset = 0;
+ man->io_offset = dev_priv->vram_start;
+ man->io_size = dev_priv->vram_size;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->io_addr = NULL;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void vmw_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ *placement = vmw_sys_placement;
+}
+
+/**
+ * FIXME: Proper access checks on buffers.
+ */
+
+static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ if (new_mem->mem_type != TTM_PL_SYSTEM)
+ vmw_dmabuf_gmr_unbind(bo);
+}
+
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+ vmw_dmabuf_gmr_unbind(bo);
+}
+
+/**
+ * FIXME: We're using the old vmware polling method to sync.
+ * Do this with fences instead.
+ */
+
+static void *vmw_sync_obj_ref(void *sync_obj)
+{
+ return sync_obj;
+}
+
+static void vmw_sync_obj_unref(void **sync_obj)
+{
+ *sync_obj = NULL;
+}
+
+static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ mutex_unlock(&dev_priv->hw_mutex);
+ return 0;
+}
+
+static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
+
+ return vmw_fence_signaled(dev_priv, sequence);
+}
+
+static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
+ bool lazy, bool interruptible)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
+
+ return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
+}
+
+struct ttm_bo_driver vmw_bo_driver = {
+ .create_ttm_backend_entry = vmw_ttm_backend_init,
+ .invalidate_caches = vmw_invalidate_caches,
+ .init_mem_type = vmw_init_mem_type,
+ .evict_flags = vmw_evict_flags,
+ .move = NULL,
+ .verify_access = vmw_verify_access,
+ .sync_obj_signaled = vmw_sync_obj_signaled,
+ .sync_obj_wait = vmw_sync_obj_wait,
+ .sync_obj_flush = vmw_sync_obj_flush,
+ .sync_obj_unref = vmw_sync_obj_unref,
+ .sync_obj_ref = vmw_sync_obj_ref,
+ .move_notify = vmw_move_notify,
+ .swap_notify = vmw_swap_notify
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 00000000000..0c9c0811f42
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,783 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
+#define VMWGFX_CHIP_SVGAII 0
+#define VMW_FB_RESERVATION 0
+
+/**
+ * Fully encoded drm commands. Might move to vmw_drm.h
+ */
+
+#define DRM_IOCTL_VMW_GET_PARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
+ struct drm_vmw_getparam_arg)
+#define DRM_IOCTL_VMW_ALLOC_DMABUF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
+ union drm_vmw_alloc_dmabuf_arg)
+#define DRM_IOCTL_VMW_UNREF_DMABUF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
+ struct drm_vmw_unref_dmabuf_arg)
+#define DRM_IOCTL_VMW_CURSOR_BYPASS \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
+ struct drm_vmw_cursor_bypass_arg)
+
+#define DRM_IOCTL_VMW_CONTROL_STREAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
+ struct drm_vmw_control_stream_arg)
+#define DRM_IOCTL_VMW_CLAIM_STREAM \
+ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
+ struct drm_vmw_stream_arg)
+#define DRM_IOCTL_VMW_UNREF_STREAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
+ struct drm_vmw_stream_arg)
+
+#define DRM_IOCTL_VMW_CREATE_CONTEXT \
+ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
+ struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_UNREF_CONTEXT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
+ struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_CREATE_SURFACE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
+ union drm_vmw_surface_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SURFACE \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
+ struct drm_vmw_surface_arg)
+#define DRM_IOCTL_VMW_REF_SURFACE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
+ union drm_vmw_surface_reference_arg)
+#define DRM_IOCTL_VMW_EXECBUF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
+ struct drm_vmw_execbuf_arg)
+#define DRM_IOCTL_VMW_FIFO_DEBUG \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
+ struct drm_vmw_fifo_debug_arg)
+#define DRM_IOCTL_VMW_FENCE_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
+ struct drm_vmw_fence_wait_arg)
+
+
+/**
+ * The core DRM version of this macro doesn't account for
+ * DRM_COMMAND_BASE.
+ */
+
+#define VMW_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+/**
+ * Ioctl definitions.
+ */
+
+static struct drm_ioctl_desc vmw_ioctls[] = {
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+ vmw_kms_cursor_bypass_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ DRM_AUTH | DRM_UNLOCKED)
+};
+
+static struct pci_device_id vmw_pci_id_list[] = {
+ {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
+ {0, 0, 0}
+};
+
+static char *vmw_devname = "vmwgfx";
+
+static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+static void vmw_master_init(struct vmw_master *);
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+ void *ptr);
+
+static void vmw_print_capabilities(uint32_t capabilities)
+{
+ DRM_INFO("Capabilities:\n");
+ if (capabilities & SVGA_CAP_RECT_COPY)
+ DRM_INFO(" Rect copy.\n");
+ if (capabilities & SVGA_CAP_CURSOR)
+ DRM_INFO(" Cursor.\n");
+ if (capabilities & SVGA_CAP_CURSOR_BYPASS)
+ DRM_INFO(" Cursor bypass.\n");
+ if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
+ DRM_INFO(" Cursor bypass 2.\n");
+ if (capabilities & SVGA_CAP_8BIT_EMULATION)
+ DRM_INFO(" 8bit emulation.\n");
+ if (capabilities & SVGA_CAP_ALPHA_CURSOR)
+ DRM_INFO(" Alpha cursor.\n");
+ if (capabilities & SVGA_CAP_3D)
+ DRM_INFO(" 3D.\n");
+ if (capabilities & SVGA_CAP_EXTENDED_FIFO)
+ DRM_INFO(" Extended Fifo.\n");
+ if (capabilities & SVGA_CAP_MULTIMON)
+ DRM_INFO(" Multimon.\n");
+ if (capabilities & SVGA_CAP_PITCHLOCK)
+ DRM_INFO(" Pitchlock.\n");
+ if (capabilities & SVGA_CAP_IRQMASK)
+ DRM_INFO(" Irq mask.\n");
+ if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
+ DRM_INFO(" Display Topology.\n");
+ if (capabilities & SVGA_CAP_GMR)
+ DRM_INFO(" GMR.\n");
+ if (capabilities & SVGA_CAP_TRACES)
+ DRM_INFO(" Traces.\n");
+}
+
+static int vmw_request_device(struct vmw_private *dev_priv)
+{
+ int ret;
+
+ vmw_kms_save_vga(dev_priv);
+
+ ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize FIFO.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vmw_release_device(struct vmw_private *dev_priv)
+{
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ vmw_kms_restore_vga(dev_priv);
+}
+
+
+static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct vmw_private *dev_priv;
+ int ret;
+ uint32_t svga_id;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (unlikely(dev_priv == NULL)) {
+ DRM_ERROR("Failed allocating a device private struct.\n");
+ return -ENOMEM;
+ }
+ memset(dev_priv, 0, sizeof(*dev_priv));
+
+ dev_priv->dev = dev;
+ dev_priv->vmw_chipset = chipset;
+ dev_priv->last_read_sequence = (uint32_t) -100;
+ mutex_init(&dev_priv->hw_mutex);
+ mutex_init(&dev_priv->cmdbuf_mutex);
+ rwlock_init(&dev_priv->resource_lock);
+ idr_init(&dev_priv->context_idr);
+ idr_init(&dev_priv->surface_idr);
+ idr_init(&dev_priv->stream_idr);
+ ida_init(&dev_priv->gmr_ida);
+ mutex_init(&dev_priv->init_mutex);
+ init_waitqueue_head(&dev_priv->fence_queue);
+ init_waitqueue_head(&dev_priv->fifo_queue);
+ atomic_set(&dev_priv->fence_queue_waiters, 0);
+ atomic_set(&dev_priv->fifo_queue_waiters, 0);
+ INIT_LIST_HEAD(&dev_priv->gmr_lru);
+
+ dev_priv->io_start = pci_resource_start(dev->pdev, 0);
+ dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
+ dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+ svga_id = vmw_read(dev_priv, SVGA_REG_ID);
+ if (svga_id != SVGA_ID_2) {
+ ret = -ENOSYS;
+ DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
+ mutex_unlock(&dev_priv->hw_mutex);
+ goto out_err0;
+ }
+
+ dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+ if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ dev_priv->max_gmr_descriptors =
+ vmw_read(dev_priv,
+ SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+ dev_priv->max_gmr_ids =
+ vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
+ }
+
+ dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+ dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+ dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ vmw_print_capabilities(dev_priv->capabilities);
+
+ if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ DRM_INFO("Max GMR ids is %u\n",
+ (unsigned)dev_priv->max_gmr_ids);
+ DRM_INFO("Max GMR descriptors is %u\n",
+ (unsigned)dev_priv->max_gmr_descriptors);
+ }
+ DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
+ dev_priv->vram_start, dev_priv->vram_size / 1024);
+ DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
+ dev_priv->mmio_start, dev_priv->mmio_size / 1024);
+
+ ret = vmw_ttm_global_init(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+
+ vmw_master_init(&dev_priv->fbdev_master);
+ ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+ dev_priv->active_master = &dev_priv->fbdev_master;
+
+
+ ret = ttm_bo_device_init(&dev_priv->bdev,
+ dev_priv->bo_global_ref.ref.object,
+ &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
+ false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+ goto out_err1;
+ }
+
+ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+ (dev_priv->vram_size >> PAGE_SHIFT));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+ goto out_err2;
+ }
+
+ dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+
+ dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
+ dev_priv->mmio_size);
+
+ if (unlikely(dev_priv->mmio_virt == NULL)) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed mapping MMIO.\n");
+ goto out_err3;
+ }
+
+ dev_priv->tdev = ttm_object_device_init
+ (dev_priv->mem_global_ref.object, 12);
+
+ if (unlikely(dev_priv->tdev == NULL)) {
+ DRM_ERROR("Unable to initialize TTM object management.\n");
+ ret = -ENOMEM;
+ goto out_err4;
+ }
+
+ dev->dev_private = dev_priv;
+
+ if (!dev->devname)
+ dev->devname = vmw_devname;
+
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
+
+ ret = pci_request_regions(dev->pdev, "vmwgfx probe");
+ dev_priv->stealth = (ret != 0);
+ if (dev_priv->stealth) {
+ /**
+ * Request at least the mmio PCI resource.
+ */
+
+ DRM_INFO("It appears like vesafb is loaded. "
+ "Ignore above error if any.\n");
+ ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
+ goto out_no_device;
+ }
+ }
+ ret = vmw_request_device(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_device;
+ vmw_kms_init(dev_priv);
+ vmw_overlay_init(dev_priv);
+ vmw_fb_init(dev_priv);
+
+ dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
+ register_pm_notifier(&dev_priv->pm_nb);
+
+ DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
+
+ return 0;
+
+out_no_device:
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev->devname == vmw_devname)
+ dev->devname = NULL;
+out_no_irq:
+ ttm_object_device_release(&dev_priv->tdev);
+out_err4:
+ iounmap(dev_priv->mmio_virt);
+out_err3:
+ drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_err2:
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+out_err1:
+ vmw_ttm_global_release(dev_priv);
+out_err0:
+ ida_destroy(&dev_priv->gmr_ida);
+ idr_destroy(&dev_priv->surface_idr);
+ idr_destroy(&dev_priv->context_idr);
+ idr_destroy(&dev_priv->stream_idr);
+ kfree(dev_priv);
+ return ret;
+}
+
+static int vmw_driver_unload(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
+
+ unregister_pm_notifier(&dev_priv->pm_nb);
+
+ vmw_fb_close(dev_priv);
+ vmw_kms_close(dev_priv);
+ vmw_overlay_close(dev_priv);
+ vmw_release_device(dev_priv);
+ if (dev_priv->stealth)
+ pci_release_region(dev->pdev, 2);
+ else
+ pci_release_regions(dev->pdev);
+
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev->devname == vmw_devname)
+ dev->devname = NULL;
+ ttm_object_device_release(&dev_priv->tdev);
+ iounmap(dev_priv->mmio_virt);
+ drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+ vmw_ttm_global_release(dev_priv);
+ ida_destroy(&dev_priv->gmr_ida);
+ idr_destroy(&dev_priv->surface_idr);
+ idr_destroy(&dev_priv->context_idr);
+ idr_destroy(&dev_priv->stream_idr);
+
+ kfree(dev_priv);
+
+ return 0;
+}
+
+static void vmw_postclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vmw_fpriv *vmw_fp;
+
+ vmw_fp = vmw_fpriv(file_priv);
+ ttm_object_file_release(&vmw_fp->tfile);
+ if (vmw_fp->locked_master)
+ drm_master_put(&vmw_fp->locked_master);
+ kfree(vmw_fp);
+}
+
+static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp;
+ int ret = -ENOMEM;
+
+ vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
+ if (unlikely(vmw_fp == NULL))
+ return ret;
+
+ vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
+ if (unlikely(vmw_fp->tfile == NULL))
+ goto out_no_tfile;
+
+ file_priv->driver_priv = vmw_fp;
+
+ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
+ dev_priv->bdev.dev_mapping =
+ file_priv->filp->f_path.dentry->d_inode->i_mapping;
+
+ return 0;
+
+out_no_tfile:
+ kfree(vmw_fp);
+ return ret;
+}
+
+static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+
+ /*
+ * Do extra checking on driver private ioctls.
+ */
+
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ struct drm_ioctl_desc *ioctl =
+ &vmw_ioctls[nr - DRM_COMMAND_BASE];
+
+ if (unlikely(ioctl->cmd != cmd)) {
+ DRM_ERROR("Invalid command format, ioctl %d\n",
+ nr - DRM_COMMAND_BASE);
+ return -EINVAL;
+ }
+ }
+
+ return drm_ioctl(filp, cmd, arg);
+}
+
+static int vmw_firstopen(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ dev_priv->is_opened = true;
+
+ return 0;
+}
+
+static void vmw_lastclose(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_crtc *crtc;
+ struct drm_mode_set set;
+ int ret;
+
+ /**
+ * Do nothing on the lastclose call from drm_unload.
+ */
+
+ if (!dev_priv->is_opened)
+ return;
+
+ dev_priv->is_opened = false;
+ set.x = 0;
+ set.y = 0;
+ set.fb = NULL;
+ set.mode = NULL;
+ set.connectors = NULL;
+ set.num_connectors = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ set.crtc = crtc;
+ ret = crtc->funcs->set_config(&set);
+ WARN_ON(ret != 0);
+ }
+
+}
+
+static void vmw_master_init(struct vmw_master *vmaster)
+{
+ ttm_lock_init(&vmaster->lock);
+}
+
+static int vmw_master_create(struct drm_device *dev,
+ struct drm_master *master)
+{
+ struct vmw_master *vmaster;
+
+ DRM_INFO("Master create.\n");
+ vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
+ if (unlikely(vmaster == NULL))
+ return -ENOMEM;
+
+ ttm_lock_init(&vmaster->lock);
+ ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+ master->driver_priv = vmaster;
+
+ return 0;
+}
+
+static void vmw_master_destroy(struct drm_device *dev,
+ struct drm_master *master)
+{
+ struct vmw_master *vmaster = vmw_master(master);
+
+ DRM_INFO("Master destroy.\n");
+ master->driver_priv = NULL;
+ kfree(vmaster);
+}
+
+
+static int vmw_master_set(struct drm_device *dev,
+ struct drm_file *file_priv,
+ bool from_open)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_master *active = dev_priv->active_master;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret = 0;
+
+ DRM_INFO("Master set.\n");
+
+ if (active) {
+ BUG_ON(active != &dev_priv->fbdev_master);
+ ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
+ if (unlikely(ret != 0))
+ goto out_no_active_lock;
+
+ ttm_lock_set_kill(&active->lock, true, SIGTERM);
+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to clean VRAM on "
+ "master drop.\n");
+ }
+
+ dev_priv->active_master = NULL;
+ }
+
+ ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+ if (!from_open) {
+ ttm_vt_unlock(&vmaster->lock);
+ BUG_ON(vmw_fp->locked_master != file_priv->master);
+ drm_master_put(&vmw_fp->locked_master);
+ }
+
+ dev_priv->active_master = vmaster;
+
+ return 0;
+
+out_no_active_lock:
+ vmw_release_device(dev_priv);
+ return ret;
+}
+
+static void vmw_master_drop(struct drm_device *dev,
+ struct drm_file *file_priv,
+ bool from_release)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ DRM_INFO("Master drop.\n");
+
+ /**
+ * Make sure the master doesn't disappear while we have
+ * it locked.
+ */
+
+ vmw_fp->locked_master = drm_master_get(file_priv->master);
+ ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+
+ if (unlikely((ret != 0))) {
+ DRM_ERROR("Unable to lock TTM at VT switch.\n");
+ drm_master_put(&vmw_fp->locked_master);
+ }
+
+ ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+
+ dev_priv->active_master = &dev_priv->fbdev_master;
+ ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+ ttm_vt_unlock(&dev_priv->fbdev_master.lock);
+
+ vmw_fb_on(dev_priv);
+}
+
+
+static void vmw_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+ void *ptr)
+{
+ struct vmw_private *dev_priv =
+ container_of(nb, struct vmw_private, pm_nb);
+ struct vmw_master *vmaster = dev_priv->active_master;
+
+ switch (val) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ttm_suspend_lock(&vmaster->lock);
+
+ /**
+ * This empties VRAM and unbinds all GMR bindings.
+ * Buffer contents is moved to swappable memory.
+ */
+ ttm_bo_swapout_all(&dev_priv->bdev);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ ttm_suspend_unlock(&vmaster->lock);
+ break;
+ case PM_RESTORE_PREPARE:
+ break;
+ case PM_POST_RESTORE:
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * These might not be needed with the virtual SVGA device.
+ */
+
+int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+int vmw_pci_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ return pci_enable_device(pdev);
+}
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_MODESET,
+ .load = vmw_driver_load,
+ .unload = vmw_driver_unload,
+ .firstopen = vmw_firstopen,
+ .lastclose = vmw_lastclose,
+ .irq_preinstall = vmw_irq_preinstall,
+ .irq_postinstall = vmw_irq_postinstall,
+ .irq_uninstall = vmw_irq_uninstall,
+ .irq_handler = vmw_irq_handler,
+ .reclaim_buffers_locked = NULL,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = vmw_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
+ .dma_quiescent = NULL, /*vmw_dma_quiescent, */
+ .master_create = vmw_master_create,
+ .master_destroy = vmw_master_destroy,
+ .master_set = vmw_master_set,
+ .master_drop = vmw_master_drop,
+ .open = vmw_driver_open,
+ .postclose = vmw_postclose,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = vmw_unlocked_ioctl,
+ .mmap = vmw_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .suspend = vmw_pci_suspend,
+ .resume = vmw_pci_resume
+ },
+ .name = VMWGFX_DRIVER_NAME,
+ .desc = VMWGFX_DRIVER_DESC,
+ .date = VMWGFX_DRIVER_DATE,
+ .major = VMWGFX_DRIVER_MAJOR,
+ .minor = VMWGFX_DRIVER_MINOR,
+ .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
+};
+
+static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &driver);
+}
+
+static int __init vmwgfx_init(void)
+{
+ int ret;
+ ret = drm_init(&driver);
+ if (ret)
+ DRM_ERROR("Failed initializing DRM.\n");
+ return ret;
+}
+
+static void __exit vmwgfx_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(vmwgfx_init);
+module_exit(vmwgfx_exit);
+
+MODULE_AUTHOR("VMware Inc. and others");
+MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 00000000000..356dc935ec1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,521 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_DRV_H_
+#define _VMWGFX_DRV_H_
+
+#include "vmwgfx_reg.h"
+#include "drmP.h"
+#include "vmwgfx_drm.h"
+#include "drm_hashtab.h"
+#include "linux/suspend.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_module.h"
+
+#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_MAJOR 1
+#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
+#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+#define VMWGFX_MAX_RELOCATIONS 2048
+#define VMWGFX_MAX_GMRS 2048
+
+struct vmw_fpriv {
+ struct drm_master *locked_master;
+ struct ttm_object_file *tfile;
+};
+
+struct vmw_dma_buffer {
+ struct ttm_buffer_object base;
+ struct list_head validate_list;
+ struct list_head gmr_lru;
+ uint32_t gmr_id;
+ bool gmr_bound;
+ uint32_t cur_validate_node;
+ bool on_validate_list;
+};
+
+struct vmw_resource {
+ struct kref kref;
+ struct vmw_private *dev_priv;
+ struct idr *idr;
+ int id;
+ enum ttm_object_type res_type;
+ bool avail;
+ void (*hw_destroy) (struct vmw_resource *res);
+ void (*res_free) (struct vmw_resource *res);
+
+ /* TODO is a generic snooper needed? */
+#if 0
+ void (*snoop)(struct vmw_resource *res,
+ struct ttm_object_file *tfile,
+ SVGA3dCmdHeader *header);
+ void *snoop_priv;
+#endif
+};
+
+struct vmw_cursor_snooper {
+ struct drm_crtc *crtc;
+ size_t age;
+ uint32_t *image;
+};
+
+struct vmw_surface {
+ struct vmw_resource res;
+ uint32_t flags;
+ uint32_t format;
+ uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ struct drm_vmw_size *sizes;
+ uint32_t num_sizes;
+
+ bool scanout;
+
+ /* TODO so far just a extra pointer */
+ struct vmw_cursor_snooper snooper;
+};
+
+struct vmw_fifo_state {
+ unsigned long reserved_size;
+ __le32 *dynamic_buffer;
+ __le32 *static_buffer;
+ __le32 *last_buffer;
+ uint32_t last_data_size;
+ uint32_t last_buffer_size;
+ bool last_buffer_add;
+ unsigned long static_buffer_size;
+ bool using_bounce_buffer;
+ uint32_t capabilities;
+ struct mutex fifo_mutex;
+ struct rw_semaphore rwsem;
+};
+
+struct vmw_relocation {
+ SVGAGuestPtr *location;
+ uint32_t index;
+};
+
+struct vmw_sw_context{
+ struct ida bo_list;
+ uint32_t last_cid;
+ bool cid_valid;
+ uint32_t last_sid;
+ uint32_t sid_translation;
+ bool sid_valid;
+ struct ttm_object_file *tfile;
+ struct list_head validate_nodes;
+ struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
+ uint32_t cur_reloc;
+ struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
+ uint32_t cur_val_buf;
+};
+
+struct vmw_legacy_display;
+struct vmw_overlay;
+
+struct vmw_master {
+ struct ttm_lock lock;
+};
+
+struct vmw_private {
+ struct ttm_bo_device bdev;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_global_reference mem_global_ref;
+
+ struct vmw_fifo_state fifo;
+
+ struct drm_device *dev;
+ unsigned long vmw_chipset;
+ unsigned int io_start;
+ uint32_t vram_start;
+ uint32_t vram_size;
+ uint32_t mmio_start;
+ uint32_t mmio_size;
+ uint32_t fb_max_width;
+ uint32_t fb_max_height;
+ __le32 __iomem *mmio_virt;
+ int mmio_mtrr;
+ uint32_t capabilities;
+ uint32_t max_gmr_descriptors;
+ uint32_t max_gmr_ids;
+ struct mutex hw_mutex;
+
+ /*
+ * VGA registers.
+ */
+
+ uint32_t vga_width;
+ uint32_t vga_height;
+ uint32_t vga_depth;
+ uint32_t vga_bpp;
+ uint32_t vga_pseudo;
+ uint32_t vga_red_mask;
+ uint32_t vga_blue_mask;
+ uint32_t vga_green_mask;
+
+ /*
+ * Framebuffer info.
+ */
+
+ void *fb_info;
+ struct vmw_legacy_display *ldu_priv;
+ struct vmw_overlay *overlay_priv;
+
+ /*
+ * Context and surface management.
+ */
+
+ rwlock_t resource_lock;
+ struct idr context_idr;
+ struct idr surface_idr;
+ struct idr stream_idr;
+
+ /*
+ * Block lastclose from racing with firstopen.
+ */
+
+ struct mutex init_mutex;
+
+ /*
+ * A resource manager for kernel-only surfaces and
+ * contexts.
+ */
+
+ struct ttm_object_device *tdev;
+
+ /*
+ * Fencing and IRQs.
+ */
+
+ atomic_t fence_seq;
+ wait_queue_head_t fence_queue;
+ wait_queue_head_t fifo_queue;
+ atomic_t fence_queue_waiters;
+ atomic_t fifo_queue_waiters;
+ uint32_t last_read_sequence;
+ spinlock_t irq_lock;
+
+ /*
+ * Device state
+ */
+
+ uint32_t traces_state;
+ uint32_t enable_state;
+ uint32_t config_done_state;
+
+ /**
+ * Execbuf
+ */
+ /**
+ * Protected by the cmdbuf mutex.
+ */
+
+ struct vmw_sw_context ctx;
+ uint32_t val_seq;
+ struct mutex cmdbuf_mutex;
+
+ /**
+ * GMR management. Protected by the lru spinlock.
+ */
+
+ struct ida gmr_ida;
+ struct list_head gmr_lru;
+
+
+ /**
+ * Operating mode.
+ */
+
+ bool stealth;
+ bool is_opened;
+
+ /**
+ * Master management.
+ */
+
+ struct vmw_master *active_master;
+ struct vmw_master fbdev_master;
+ struct notifier_block pm_nb;
+};
+
+static inline struct vmw_private *vmw_priv(struct drm_device *dev)
+{
+ return (struct vmw_private *)dev->dev_private;
+}
+
+static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
+{
+ return (struct vmw_fpriv *)file_priv->driver_priv;
+}
+
+static inline struct vmw_master *vmw_master(struct drm_master *master)
+{
+ return (struct vmw_master *) master->driver_priv;
+}
+
+static inline void vmw_write(struct vmw_private *dev_priv,
+ unsigned int offset, uint32_t value)
+{
+ outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+ outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+}
+
+static inline uint32_t vmw_read(struct vmw_private *dev_priv,
+ unsigned int offset)
+{
+ uint32_t val;
+
+ outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+ val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ return val;
+}
+
+/**
+ * GMR utilities - vmwgfx_gmr.c
+ */
+
+extern int vmw_gmr_bind(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo);
+extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
+
+/**
+ * Resource utilities - vmwgfx_resource.c
+ */
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+extern void vmw_resource_unreference(struct vmw_resource **p_res);
+extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id);
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res));
+extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id);
+extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interuptable,
+ void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+ uint32_t cur_validate_node);
+extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
+extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+ uint32_t id, struct vmw_dma_buffer **out);
+extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
+extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
+extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
+extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
+extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
+extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t *inout_id,
+ struct vmw_resource **out);
+
+
+/**
+ * Misc Ioctl functionality - vmwgfx_ioctl.c
+ */
+
+extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * Fifo utilities - vmwgfx_fifo.c
+ */
+
+extern int vmw_fifo_init(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo);
+extern void vmw_fifo_release(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo);
+extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
+ uint32_t *sequence);
+extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
+extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
+extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+
+/**
+ * TTM glue - vmwgfx_ttm_glue.c
+ */
+
+extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
+extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
+extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/**
+ * TTM buffer object driver - vmwgfx_buffer.c
+ */
+
+extern struct ttm_placement vmw_vram_placement;
+extern struct ttm_placement vmw_vram_ne_placement;
+extern struct ttm_placement vmw_vram_sys_placement;
+extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_bo_driver vmw_bo_driver;
+extern int vmw_dma_quiescent(struct drm_device *dev);
+
+/**
+ * Command submission - vmwgfx_execbuf.c
+ */
+
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * IRQs and wating - vmwgfx_irq.c
+ */
+
+extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
+ uint32_t sequence, bool interruptible,
+ unsigned long timeout);
+extern void vmw_irq_preinstall(struct drm_device *dev);
+extern int vmw_irq_postinstall(struct drm_device *dev);
+extern void vmw_irq_uninstall(struct drm_device *dev);
+extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence);
+extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fallback_wait(struct vmw_private *dev_priv,
+ bool lazy,
+ bool fifo_idle,
+ uint32_t sequence,
+ bool interruptible,
+ unsigned long timeout);
+
+/**
+ * Kernel framebuffer - vmwgfx_fb.c
+ */
+
+int vmw_fb_init(struct vmw_private *vmw_priv);
+int vmw_fb_close(struct vmw_private *dev_priv);
+int vmw_fb_off(struct vmw_private *vmw_priv);
+int vmw_fb_on(struct vmw_private *vmw_priv);
+
+/**
+ * Kernel modesetting - vmwgfx_kms.c
+ */
+
+int vmw_kms_init(struct vmw_private *dev_priv);
+int vmw_kms_close(struct vmw_private *dev_priv);
+int vmw_kms_save_vga(struct vmw_private *vmw_priv);
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header);
+
+/**
+ * Overlay control - vmwgfx_overlay.c
+ */
+
+int vmw_overlay_init(struct vmw_private *dev_priv);
+int vmw_overlay_close(struct vmw_private *dev_priv);
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vmw_overlay_stop_all(struct vmw_private *dev_priv);
+int vmw_overlay_resume_all(struct vmw_private *dev_priv);
+int vmw_overlay_pause_all(struct vmw_private *dev_priv);
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
+
+/**
+ * Inline helper functions
+ */
+
+static inline void vmw_surface_unreference(struct vmw_surface **srf)
+{
+ struct vmw_surface *tmp_srf = *srf;
+ struct vmw_resource *res = &tmp_srf->res;
+ *srf = NULL;
+
+ vmw_resource_unreference(&res);
+}
+
+static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
+{
+ (void) vmw_resource_reference(&srf->res);
+ return srf;
+}
+
+static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+{
+ struct vmw_dma_buffer *tmp_buf = *buf;
+ struct ttm_buffer_object *bo = &tmp_buf->base;
+ *buf = NULL;
+
+ ttm_bo_unref(&bo);
+}
+
+static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+{
+ if (ttm_bo_reference(&buf->base))
+ return buf;
+ return NULL;
+}
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 00000000000..0897359b3e4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,716 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_reg.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_placement.h"
+
+static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+}
+
+static int vmw_cmd_ok(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ return 0;
+}
+
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
+ return 0;
+
+ ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use context %u\n",
+ (unsigned) cmd->cid);
+ return ret;
+ }
+
+ sw_context->last_cid = cmd->cid;
+ sw_context->cid_valid = true;
+
+ return 0;
+}
+
+static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ uint32_t *sid)
+{
+ if (*sid == SVGA3D_INVALID_ID)
+ return 0;
+
+ if (unlikely((!sw_context->sid_valid ||
+ *sid != sw_context->last_sid))) {
+ int real_id;
+ int ret = vmw_surface_check(dev_priv, sw_context->tfile,
+ *sid, &real_id);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could ot find or use surface 0x%08x "
+ "address 0x%08lx\n",
+ (unsigned int) *sid,
+ (unsigned long) sid);
+ return ret;
+ }
+
+ sw_context->last_sid = *sid;
+ sw_context->sid_valid = true;
+ *sid = real_id;
+ sw_context->sid_translation = real_id;
+ } else
+ *sid = sw_context->sid_translation;
+
+ return 0;
+}
+
+
+static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ return ret;
+}
+
+static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceCopy body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ if (unlikely(ret != 0))
+ return ret;
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+}
+
+static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceStretchBlt body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ if (unlikely(ret != 0))
+ return ret;
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+}
+
+static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBlitSurfaceToScreen body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+}
+
+static int vmw_cmd_present_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+}
+
+static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAGuestPtr *ptr,
+ struct vmw_dma_buffer **vmw_bo_p)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ uint32_t handle = ptr->gmrId;
+ struct vmw_relocation *reloc;
+ uint32_t cur_validate_node;
+ struct ttm_validate_buffer *val_buf;
+ int ret;
+
+ ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use GMR region.\n");
+ return -EINVAL;
+ }
+ bo = &vmw_bo->base;
+
+ if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+ DRM_ERROR("Max number relocations per submission"
+ " exceeded\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc = &sw_context->relocs[sw_context->cur_reloc++];
+ reloc->location = ptr;
+
+ cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
+ if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
+ DRM_ERROR("Max number of DMA buffers per submission"
+ " exceeded.\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc->index = cur_validate_node;
+ if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
+ val_buf = &sw_context->val_bufs[cur_validate_node];
+ val_buf->bo = ttm_bo_reference(bo);
+ val_buf->new_sync_obj_arg = (void *) dev_priv;
+ list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ ++sw_context->cur_val_buf;
+ }
+ *vmw_bo_p = vmw_bo;
+ return 0;
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_p = NULL;
+ return ret;
+}
+
+static int vmw_cmd_end_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->q.guestResult,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->q.guestResult,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ struct vmw_surface *srf = NULL;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->dma.guest.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ bo = &vmw_bo->base;
+ ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+ cmd->dma.host.sid, &srf);
+ if (ret) {
+ DRM_ERROR("could not find surface\n");
+ goto out_no_reloc;
+ }
+
+ /**
+ * Patch command stream with device SID.
+ */
+
+ cmd->dma.host.sid = srf->res.id;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
+ vmw_surface_unreference(&srf);
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ return ret;
+}
+
+static int vmw_cmd_draw(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDrawPrimitives body;
+ } *cmd;
+ SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
+ (unsigned long)header + sizeof(*cmd));
+ SVGA3dPrimitiveRange *range;
+ uint32_t i;
+ uint32_t maxnum;
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ cmd = container_of(header, struct vmw_draw_cmd, header);
+ maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
+
+ if (unlikely(cmd->body.numVertexDecls > maxnum)) {
+ DRM_ERROR("Illegal number of vertex declarations.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &decl->array.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ maxnum = (header->size - sizeof(cmd->body) -
+ cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
+ if (unlikely(cmd->body.numRanges > maxnum)) {
+ DRM_ERROR("Illegal number of index ranges.\n");
+ return -EINVAL;
+ }
+
+ range = (SVGA3dPrimitiveRange *) decl;
+ for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &range->indexArray.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+
+static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_tex_state_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetTextureState state;
+ };
+
+ SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+ ((unsigned long) header + header->size + sizeof(header));
+ SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+ ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ for (; cur_state < last_state; ++cur_state) {
+ if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
+ continue;
+
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &cur_state->value);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+
+typedef int (*vmw_cmd_func) (struct vmw_private *,
+ struct vmw_sw_context *,
+ SVGA3dCmdHeader *);
+
+#define VMW_CMD_DEF(cmd, func) \
+ [cmd - SVGA_3D_CMD_BASE] = func
+
+static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
+ &vmw_cmd_set_render_target_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+ VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
+ &vmw_cmd_blt_surf_screen_check)
+};
+
+static int vmw_cmd_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t *size)
+{
+ uint32_t cmd_id;
+ uint32_t size_remaining = *size;
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+ int ret;
+
+ cmd_id = ((uint32_t *)buf)[0];
+ if (cmd_id == SVGA_CMD_UPDATE) {
+ *size = 5 << 2;
+ return 0;
+ }
+
+ cmd_id = le32_to_cpu(header->id);
+ *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+
+ cmd_id -= SVGA_3D_CMD_BASE;
+ if (unlikely(*size > size_remaining))
+ goto out_err;
+
+ if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
+ goto out_err;
+
+ ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ return 0;
+out_err:
+ DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+}
+
+static int vmw_cmd_check_all(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t size)
+{
+ int32_t cur_size = size;
+ int ret;
+
+ while (cur_size > 0) {
+ size = cur_size;
+ ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
+ if (unlikely(ret != 0))
+ return ret;
+ buf = (void *)((unsigned long) buf + size);
+ cur_size -= size;
+ }
+
+ if (unlikely(cur_size != 0)) {
+ DRM_ERROR("Command verifier out of sync.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vmw_free_relocations(struct vmw_sw_context *sw_context)
+{
+ sw_context->cur_reloc = 0;
+}
+
+static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
+{
+ uint32_t i;
+ struct vmw_relocation *reloc;
+ struct ttm_validate_buffer *validate;
+ struct ttm_buffer_object *bo;
+
+ for (i = 0; i < sw_context->cur_reloc; ++i) {
+ reloc = &sw_context->relocs[i];
+ validate = &sw_context->val_bufs[reloc->index];
+ bo = validate->bo;
+ reloc->location->offset += bo->offset;
+ reloc->location->gmrId = vmw_dmabuf_gmr(bo);
+ }
+ vmw_free_relocations(sw_context);
+}
+
+static void vmw_clear_validations(struct vmw_sw_context *sw_context)
+{
+ struct ttm_validate_buffer *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
+ head) {
+ list_del(&entry->head);
+ vmw_dmabuf_validate_clear(entry->bo);
+ ttm_bo_unref(&entry->bo);
+ sw_context->cur_val_buf--;
+ }
+ BUG_ON(sw_context->cur_val_buf != 0);
+}
+
+static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
+ return 0;
+
+ /**
+ * Put BO in VRAM, only if there is space.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ if (unlikely(ret == -ERESTARTSYS))
+ return ret;
+
+ /**
+ * Otherwise, set it up as GMR.
+ */
+
+ if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
+ return 0;
+
+ ret = vmw_gmr_bind(dev_priv, bo);
+ if (likely(ret == 0 || ret == -ERESTARTSYS))
+ return ret;
+
+ /**
+ * If that failed, try VRAM again, this time evicting
+ * previous contents.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ return ret;
+}
+
+
+static int vmw_validate_buffers(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+ list_for_each_entry(entry, &sw_context->validate_nodes, head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct drm_vmw_fence_rep fence_rep;
+ struct drm_vmw_fence_rep __user *user_fence_rep;
+ int ret;
+ void *user_cmd;
+ void *cmd;
+ uint32_t sequence;
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_no_cmd_mutex;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ user_cmd = (void __user *)(unsigned long)arg->commands;
+ ret = copy_from_user(cmd, user_cmd, arg->command_size);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed copying commands.\n");
+ goto out_commit;
+ }
+
+ sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->cid_valid = false;
+ sw_context->sid_valid = false;
+ sw_context->cur_reloc = 0;
+ sw_context->cur_val_buf = 0;
+
+ INIT_LIST_HEAD(&sw_context->validate_nodes);
+
+ ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
+ dev_priv->val_seq++);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ ret = vmw_validate_buffers(dev_priv, sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ vmw_apply_relocations(sw_context);
+ vmw_fifo_commit(dev_priv, arg->command_size);
+
+ ret = vmw_fifo_send_fence(dev_priv, &sequence);
+
+ ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
+ (void *)(unsigned long) sequence);
+ vmw_clear_validations(sw_context);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * This error is harmless, because if fence submission fails,
+ * vmw_fifo_send_fence will sync.
+ */
+
+ if (ret != 0)
+ DRM_ERROR("Fence submission error. Syncing.\n");
+
+ fence_rep.error = ret;
+ fence_rep.fence_seq = (uint64_t) sequence;
+
+ user_fence_rep = (struct drm_vmw_fence_rep __user *)
+ (unsigned long)arg->fence_rep;
+
+ /*
+ * copy_to_user errors will be detected by user space not
+ * seeing fence_rep::error filled in.
+ */
+
+ ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
+
+ vmw_kms_cursor_post_execbuf(dev_priv);
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_err:
+ vmw_free_relocations(sw_context);
+ ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_clear_validations(sw_context);
+out_commit:
+ vmw_fifo_commit(dev_priv, 0);
+out_unlock:
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+out_no_cmd_mutex:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 00000000000..a93367041cd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,737 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 David Airlie
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#include "ttm/ttm_placement.h"
+
+#define VMW_DIRTY_DELAY (HZ / 30)
+
+struct vmw_fb_par {
+ struct vmw_private *vmw_priv;
+
+ void *vmalloc;
+
+ struct vmw_dma_buffer *vmw_bo;
+ struct ttm_bo_kmap_obj map;
+
+ u32 pseudo_palette[17];
+
+ unsigned depth;
+ unsigned bpp;
+
+ unsigned max_width;
+ unsigned max_height;
+
+ void *bo_ptr;
+ unsigned bo_size;
+ bool bo_iowrite;
+
+ struct {
+ spinlock_t lock;
+ bool active;
+ unsigned x1;
+ unsigned y1;
+ unsigned x2;
+ unsigned y2;
+ } dirty;
+};
+
+static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ u32 *pal = par->pseudo_palette;
+
+ if (regno > 15) {
+ DRM_ERROR("Bad regno %u.\n", regno);
+ return 1;
+ }
+
+ switch (par->depth) {
+ case 24:
+ case 32:
+ pal[regno] = ((red & 0xff00) << 8) |
+ (green & 0xff00) |
+ ((blue & 0xff00) >> 8);
+ break;
+ default:
+ DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ int depth = var->bits_per_pixel;
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+
+ switch (var->bits_per_pixel) {
+ case 32:
+ depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (depth) {
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ break;
+ default:
+ DRM_ERROR("Bad depth %u.\n", depth);
+ return -EINVAL;
+ }
+
+ /* without multimon its hard to resize */
+ if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
+ (var->xres != par->max_width ||
+ var->yres != par->max_height)) {
+ DRM_ERROR("Tried to resize, but we don't have multimon\n");
+ return -EINVAL;
+ }
+
+ if (var->xres > par->max_width ||
+ var->yres > par->max_height) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+
+ /* TODO check if pitch and offset changes */
+
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ } else {
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
+
+ /* TODO check if pitch and offset changes */
+ }
+
+ return 0;
+}
+
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static int vmw_fb_blank(int blank, struct fb_info *info)
+{
+ return 0;
+}
+
+/*
+ * Dirty code
+ */
+
+static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+{
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ struct fb_info *info = vmw_priv->fb_info;
+ int stride = (info->fix.line_length / 4);
+ int *src = (int *)info->screen_base;
+ __le32 __iomem *vram_mem = par->bo_ptr;
+ unsigned long flags;
+ unsigned x, y, w, h;
+ int i, k;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ if (!par->dirty.active) {
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+ return;
+ }
+ x = par->dirty.x1;
+ y = par->dirty.y1;
+ w = min(par->dirty.x2, info->var.xres) - x;
+ h = min(par->dirty.y2, info->var.yres) - y;
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y2 = 0;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+ for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
+ for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
+ iowrite32(src[k], vram_mem + k);
+ }
+
+#if 0
+ DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
+#endif
+
+ cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return;
+ }
+
+ cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
+ cmd->body.x = cpu_to_le32(x);
+ cmd->body.y = cpu_to_le32(y);
+ cmd->body.width = cpu_to_le32(w);
+ cmd->body.height = cpu_to_le32(h);
+ vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+}
+
+static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
+ unsigned x1, unsigned y1,
+ unsigned width, unsigned height)
+{
+ struct fb_info *info = par->vmw_priv->fb_info;
+ unsigned long flags;
+ unsigned x2 = x1 + width;
+ unsigned y2 = y1 + height;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ if (par->dirty.x1 == par->dirty.x2) {
+ par->dirty.x1 = x1;
+ par->dirty.y1 = y1;
+ par->dirty.x2 = x2;
+ par->dirty.y2 = y2;
+ /* if we are active start the dirty work
+ * we share the work with the defio system */
+ if (par->dirty.active)
+ schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+ } else {
+ if (x1 < par->dirty.x1)
+ par->dirty.x1 = x1;
+ if (y1 < par->dirty.y1)
+ par->dirty.y1 = y1;
+ if (x2 > par->dirty.x2)
+ par->dirty.x2 = x2;
+ if (y2 > par->dirty.y2)
+ par->dirty.y2 = y2;
+ }
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+}
+
+static void vmw_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct vmw_fb_par *par = info->par;
+ unsigned long start, end, min, max;
+ unsigned long flags;
+ struct page *page;
+ int y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = (max / info->fix.line_length) + 1;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.x1 = 0;
+ par->dirty.y1 = y1;
+ par->dirty.x2 = info->var.xres;
+ par->dirty.y2 = y2;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+ }
+
+ vmw_fb_dirty_flush(par);
+};
+
+struct fb_deferred_io vmw_defio = {
+ .delay = VMW_DIRTY_DELAY,
+ .deferred_io = vmw_deferred_io,
+};
+
+/*
+ * Draw code
+ */
+
+static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+ vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
+ rect->width, rect->height);
+}
+
+static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ cfb_copyarea(info, region);
+ vmw_fb_dirty_mark(info->par, region->dx, region->dy,
+ region->width, region->height);
+}
+
+static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+ vmw_fb_dirty_mark(info->par, image->dx, image->dy,
+ image->width, image->height);
+}
+
+/*
+ * Bring up code
+ */
+
+static struct fb_ops vmw_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = vmw_fb_check_var,
+ .fb_set_par = vmw_fb_set_par,
+ .fb_setcolreg = vmw_fb_setcolreg,
+ .fb_fillrect = vmw_fb_fillrect,
+ .fb_copyarea = vmw_fb_copyarea,
+ .fb_imageblit = vmw_fb_imageblit,
+ .fb_pan_display = vmw_fb_pan_display,
+ .fb_blank = vmw_fb_blank,
+};
+
+static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+ size_t size, struct vmw_dma_buffer **out)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ int ret;
+
+ ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* interuptable? */
+ ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
+ if (!vmw_bo)
+ goto err_unlock;
+
+ ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+ &ne_placement,
+ false,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto err_unlock; /* init frees the buffer on failure */
+
+ *out = vmw_bo;
+
+ ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+
+ return 0;
+
+err_unlock:
+ ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+ return ret;
+}
+
+int vmw_fb_init(struct vmw_private *vmw_priv)
+{
+ struct device *device = &vmw_priv->dev->pdev->dev;
+ struct vmw_fb_par *par;
+ struct fb_info *info;
+ unsigned initial_width, initial_height;
+ unsigned fb_width, fb_height;
+ unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
+ int ret;
+
+ initial_width = 800;
+ initial_height = 600;
+
+ fb_bbp = 32;
+ fb_depth = 24;
+
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
+ } else {
+ fb_width = min(vmw_priv->fb_max_width, initial_width);
+ fb_height = min(vmw_priv->fb_max_height, initial_height);
+ }
+
+ initial_width = min(fb_width, initial_width);
+ initial_height = min(fb_height, initial_height);
+
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+
+ fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+ fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
+ fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
+
+ DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
+ DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
+ DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
+ DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
+ DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
+ DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
+ DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
+ DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
+ DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
+ DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
+ DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
+ DRM_DEBUG("fb_pitch %u\n", fb_pitch);
+ DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
+
+ info = framebuffer_alloc(sizeof(*par), device);
+ if (!info)
+ return -ENOMEM;
+
+ /*
+ * Par
+ */
+ vmw_priv->fb_info = info;
+ par = info->par;
+ par->vmw_priv = vmw_priv;
+ par->depth = fb_depth;
+ par->bpp = fb_bbp;
+ par->vmalloc = NULL;
+ par->max_width = fb_width;
+ par->max_height = fb_height;
+
+ /*
+ * Create buffers and alloc memory
+ */
+ par->vmalloc = vmalloc(fb_size);
+ if (unlikely(par->vmalloc == NULL)) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
+ if (unlikely(ret != 0))
+ goto err_free;
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base,
+ 0,
+ par->vmw_bo->base.num_pages,
+ &par->map);
+ if (unlikely(ret != 0))
+ goto err_unref;
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ par->bo_size = fb_size;
+
+ /*
+ * Fixed and var
+ */
+ strcpy(info->fix.id, "svgadrmfb");
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.line_length = fb_pitch;
+
+ info->fix.smem_start = 0;
+ info->fix.smem_len = fb_size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ info->pseudo_palette = par->pseudo_palette;
+ info->screen_base = par->vmalloc;
+ info->screen_size = fb_size;
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &vmw_fb_ops;
+
+ /* 24 depth per default */
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+
+ info->var.xres_virtual = fb_width;
+ info->var.yres_virtual = fb_height;
+ info->var.bits_per_pixel = par->bpp;
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+
+ info->var.xres = initial_width;
+ info->var.yres = initial_height;
+
+#if 0
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#else
+ info->pixmap.size = 0;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#endif
+
+ info->aperture_base = vmw_priv->vram_start;
+ info->aperture_size = vmw_priv->vram_size;
+
+ /*
+ * Dirty & Deferred IO
+ */
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y1 = 0;
+ par->dirty.active = true;
+ spin_lock_init(&par->dirty.lock);
+ info->fbdefio = &vmw_defio;
+ fb_deferred_io_init(info);
+
+ ret = register_framebuffer(info);
+ if (unlikely(ret != 0))
+ goto err_defio;
+
+ return 0;
+
+err_defio:
+ fb_deferred_io_cleanup(info);
+ ttm_bo_kunmap(&par->map);
+err_unref:
+ ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
+err_free:
+ vfree(par->vmalloc);
+ framebuffer_release(info);
+ vmw_priv->fb_info = NULL;
+
+ return ret;
+}
+
+int vmw_fb_close(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ struct ttm_buffer_object *bo;
+
+ if (!vmw_priv->fb_info)
+ return 0;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+ bo = &par->vmw_bo->base;
+ par->vmw_bo = NULL;
+
+ /* ??? order */
+ fb_deferred_io_cleanup(info);
+ unregister_framebuffer(info);
+
+ ttm_bo_kunmap(&par->map);
+ ttm_bo_unref(&bo);
+
+ vfree(par->vmalloc);
+ framebuffer_release(info);
+
+ return 0;
+}
+
+int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ int ret = 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ int ret = 0;
+
+ ne_placement.lpfn = bo->num_pages;
+
+ /* interuptable? */
+ ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&vmw_priv->active_master->lock);
+
+ return ret;
+}
+
+int vmw_fb_off(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ unsigned long flags;
+
+ if (!vmw_priv->fb_info)
+ return -EINVAL;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.active = false;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+ flush_scheduled_work();
+
+ par->bo_ptr = NULL;
+ ttm_bo_kunmap(&par->map);
+
+ vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
+
+ return 0;
+}
+
+int vmw_fb_on(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ unsigned long flags;
+ bool dummy;
+ int ret;
+
+ if (!vmw_priv->fb_info)
+ return -EINVAL;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+
+ /* we are already active */
+ if (par->bo_ptr != NULL)
+ return 0;
+
+ /* Make sure that all overlays are stoped when we take over */
+ vmw_overlay_stop_all(vmw_priv);
+
+ ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("could not move buffer to start of VRAM\n");
+ goto err_no_buffer;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base,
+ 0,
+ par->vmw_bo->base.num_pages,
+ &par->map);
+ BUG_ON(ret != 0);
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.active = true;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+err_no_buffer:
+ vmw_fb_set_par(info);
+
+ vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
+
+ /* If there already was stuff dirty we wont
+ * schedule a new work, so lets do it now */
+ schedule_delayed_work(&info->deferred_work, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 00000000000..39d43a01d84
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,538 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "drmP.h"
+#include "ttm/ttm_placement.h"
+
+bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t fifo_min, hwversion;
+
+ fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
+ return false;
+
+ hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ if (hwversion == 0)
+ return false;
+
+ if (hwversion < SVGA3D_HWVERSION_WS65_B1)
+ return false;
+
+ return true;
+}
+
+int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max;
+ uint32_t min;
+ uint32_t dummy;
+ int ret;
+
+ fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+ fifo->static_buffer = vmalloc(fifo->static_buffer_size);
+ if (unlikely(fifo->static_buffer == NULL))
+ return -ENOMEM;
+
+ fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+ fifo->last_data_size = 0;
+ fifo->last_buffer_add = false;
+ fifo->last_buffer = vmalloc(fifo->last_buffer_size);
+ if (unlikely(fifo->last_buffer == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ fifo->dynamic_buffer = NULL;
+ fifo->reserved_size = 0;
+ fifo->using_bounce_buffer = false;
+
+ mutex_init(&fifo->fifo_mutex);
+ init_rwsem(&fifo->rwsem);
+
+ /*
+ * Allow mapping the first page read-only to user-space.
+ */
+
+ DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+ DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+ DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+ mutex_lock(&dev_priv->hw_mutex);
+ dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+ dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+ vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+ min = 4;
+ if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
+ min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
+ min <<= 2;
+
+ if (min < PAGE_SIZE)
+ min = PAGE_SIZE;
+
+ iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
+ iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+ wmb();
+ iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
+ iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
+ mb();
+
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+
+ DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
+ (unsigned int) max,
+ (unsigned int) min,
+ (unsigned int) fifo->capabilities);
+
+ atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
+ iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
+
+ return vmw_fifo_send_fence(dev_priv, &dummy);
+out_err:
+ vfree(fifo->static_buffer);
+ fifo->static_buffer = NULL;
+ return ret;
+}
+
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
+ iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
+ vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+ }
+
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+
+ dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
+ dev_priv->config_done_state);
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ dev_priv->enable_state);
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (likely(fifo->last_buffer != NULL)) {
+ vfree(fifo->last_buffer);
+ fifo->last_buffer = NULL;
+ }
+
+ if (likely(fifo->static_buffer != NULL)) {
+ vfree(fifo->static_buffer);
+ fifo->static_buffer = NULL;
+ }
+
+ if (likely(fifo->dynamic_buffer != NULL)) {
+ vfree(fifo->dynamic_buffer);
+ fifo->dynamic_buffer = NULL;
+ }
+}
+
+static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+
+ return ((max - next_cmd) + (stop - min) <= bytes);
+}
+
+static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+ uint32_t bytes, bool interruptible,
+ unsigned long timeout)
+{
+ int ret = 0;
+ unsigned long end_jiffies = jiffies + timeout;
+ DEFINE_WAIT(__wait);
+
+ DRM_INFO("Fifo wait noirq.\n");
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->fifo_queue, &__wait,
+ (interruptible) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (!vmw_fifo_is_full(dev_priv, bytes))
+ break;
+ if (time_after_eq(jiffies, end_jiffies)) {
+ ret = -EBUSY;
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+ schedule_timeout(1);
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&dev_priv->fifo_queue, &__wait);
+ wake_up_all(&dev_priv->fifo_queue);
+ DRM_INFO("Fifo noirq exit.\n");
+ return ret;
+}
+
+static int vmw_fifo_wait(struct vmw_private *dev_priv,
+ uint32_t bytes, bool interruptible,
+ unsigned long timeout)
+{
+ long ret = 1L;
+ unsigned long irq_flags;
+
+ if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return vmw_fifo_wait_noirq(dev_priv, bytes,
+ interruptible, timeout);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_FIFO_PROGRESS,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_FIFO_PROGRESS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (dev_priv->fifo_queue,
+ !vmw_fifo_is_full(dev_priv, bytes), timeout);
+ else
+ ret = wait_event_timeout
+ (dev_priv->fifo_queue,
+ !vmw_fifo_is_full(dev_priv, bytes), timeout);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_FIFO_PROGRESS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ret;
+}
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max;
+ uint32_t min;
+ uint32_t next_cmd;
+ uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+ int ret;
+
+ mutex_lock(&fifo_state->fifo_mutex);
+ max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+
+ if (unlikely(bytes >= (max - min)))
+ goto out_err;
+
+ BUG_ON(fifo_state->reserved_size != 0);
+ BUG_ON(fifo_state->dynamic_buffer != NULL);
+
+ fifo_state->reserved_size = bytes;
+
+ while (1) {
+ uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+ bool need_bounce = false;
+ bool reserve_in_place = false;
+
+ if (next_cmd >= stop) {
+ if (likely((next_cmd + bytes < max ||
+ (next_cmd + bytes == max && stop > min))))
+ reserve_in_place = true;
+
+ else if (vmw_fifo_is_full(dev_priv, bytes)) {
+ ret = vmw_fifo_wait(dev_priv, bytes,
+ false, 3 * HZ);
+ if (unlikely(ret != 0))
+ goto out_err;
+ } else
+ need_bounce = true;
+
+ } else {
+
+ if (likely((next_cmd + bytes < stop)))
+ reserve_in_place = true;
+ else {
+ ret = vmw_fifo_wait(dev_priv, bytes,
+ false, 3 * HZ);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+ }
+
+ if (reserve_in_place) {
+ if (reserveable || bytes <= sizeof(uint32_t)) {
+ fifo_state->using_bounce_buffer = false;
+
+ if (reserveable)
+ iowrite32(bytes, fifo_mem +
+ SVGA_FIFO_RESERVED);
+ return fifo_mem + (next_cmd >> 2);
+ } else {
+ need_bounce = true;
+ }
+ }
+
+ if (need_bounce) {
+ fifo_state->using_bounce_buffer = true;
+ if (bytes < fifo_state->static_buffer_size)
+ return fifo_state->static_buffer;
+ else {
+ fifo_state->dynamic_buffer = vmalloc(bytes);
+ return fifo_state->dynamic_buffer;
+ }
+ }
+ }
+out_err:
+ fifo_state->reserved_size = 0;
+ mutex_unlock(&fifo_state->fifo_mutex);
+ return NULL;
+}
+
+static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
+ __le32 __iomem *fifo_mem,
+ uint32_t next_cmd,
+ uint32_t max, uint32_t min, uint32_t bytes)
+{
+ uint32_t chunk_size = max - next_cmd;
+ uint32_t rest;
+ uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+ fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+ if (bytes < chunk_size)
+ chunk_size = bytes;
+
+ iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+ mb();
+ memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
+ rest = bytes - chunk_size;
+ if (rest)
+ memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
+ rest);
+}
+
+static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
+ __le32 __iomem *fifo_mem,
+ uint32_t next_cmd,
+ uint32_t max, uint32_t min, uint32_t bytes)
+{
+ uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+ fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+ while (bytes > 0) {
+ iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
+ next_cmd += sizeof(uint32_t);
+ if (unlikely(next_cmd == max))
+ next_cmd = min;
+ mb();
+ iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ mb();
+ bytes -= sizeof(uint32_t);
+ }
+}
+
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+
+ BUG_ON((bytes & 3) != 0);
+ BUG_ON(bytes > fifo_state->reserved_size);
+
+ fifo_state->reserved_size = 0;
+
+ if (fifo_state->using_bounce_buffer) {
+ if (reserveable)
+ vmw_fifo_res_copy(fifo_state, fifo_mem,
+ next_cmd, max, min, bytes);
+ else
+ vmw_fifo_slow_copy(fifo_state, fifo_mem,
+ next_cmd, max, min, bytes);
+
+ if (fifo_state->dynamic_buffer) {
+ vfree(fifo_state->dynamic_buffer);
+ fifo_state->dynamic_buffer = NULL;
+ }
+
+ }
+
+ down_write(&fifo_state->rwsem);
+ if (fifo_state->using_bounce_buffer || reserveable) {
+ next_cmd += bytes;
+ if (next_cmd >= max)
+ next_cmd -= max - min;
+ mb();
+ iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ }
+
+ if (reserveable)
+ iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
+ mb();
+ up_write(&fifo_state->rwsem);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ mutex_unlock(&fifo_state->fifo_mutex);
+}
+
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct svga_fifo_cmd_fence *cmd_fence;
+ void *fm;
+ int ret = 0;
+ uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+
+ fm = vmw_fifo_reserve(dev_priv, bytes);
+ if (unlikely(fm == NULL)) {
+ *sequence = atomic_read(&dev_priv->fence_seq);
+ ret = -ENOMEM;
+ (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
+ false, 3*HZ);
+ goto out_err;
+ }
+
+ do {
+ *sequence = atomic_add_return(1, &dev_priv->fence_seq);
+ } while (*sequence == 0);
+
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+
+ /*
+ * Don't request hardware to send a fence. The
+ * waiting code in vmwgfx_irq.c will emulate this.
+ */
+
+ vmw_fifo_commit(dev_priv, 0);
+ return 0;
+ }
+
+ *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
+ cmd_fence = (struct svga_fifo_cmd_fence *)
+ ((unsigned long)fm + sizeof(__le32));
+
+ iowrite32(*sequence, &cmd_fence->fence);
+ fifo_state->last_buffer_add = true;
+ vmw_fifo_commit(dev_priv, bytes);
+ fifo_state->last_buffer_add = false;
+
+out_err:
+ return ret;
+}
+
+/**
+ * Map the first page of the FIFO read-only to user-space.
+ */
+
+static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int ret;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+
+ if (address != vma->vm_start)
+ return VM_FAULT_SIGBUS;
+
+ ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
+ if (likely(ret == -EBUSY || ret == 0))
+ return VM_FAULT_NOPAGE;
+ else if (ret == -ENOMEM)
+ return VM_FAULT_OOM;
+
+ return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct vmw_fifo_vm_ops = {
+ .fault = vmw_fifo_vm_fault,
+ .open = NULL,
+ .close = NULL
+};
+
+int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vmw_private *dev_priv;
+
+ file_priv = (struct drm_file *)filp->private_data;
+ dev_priv = vmw_priv(file_priv->minor->dev);
+
+ if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
+ (vma->vm_end - vma->vm_start) != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
+ vma->vm_page_prot);
+ vma->vm_ops = &vmw_fifo_vm_ops;
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 00000000000..5f8908a5d7f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,213 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "drmP.h"
+#include "ttm/ttm_bo_driver.h"
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
+ struct page *pages[],
+ unsigned long num_pages)
+{
+ struct page *page, *next;
+ struct svga_guest_mem_descriptor *page_virtual = NULL;
+ struct svga_guest_mem_descriptor *desc_virtual = NULL;
+ unsigned int desc_per_page;
+ unsigned long prev_pfn;
+ unsigned long pfn;
+ int ret;
+
+ desc_per_page = PAGE_SIZE /
+ sizeof(struct svga_guest_mem_descriptor) - 1;
+
+ while (likely(num_pages != 0)) {
+ page = alloc_page(__GFP_HIGHMEM);
+ if (unlikely(page == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ list_add_tail(&page->lru, desc_pages);
+
+ /*
+ * Point previous page terminating descriptor to this
+ * page before unmapping it.
+ */
+
+ if (likely(page_virtual != NULL)) {
+ desc_virtual->ppn = page_to_pfn(page);
+ kunmap_atomic(page_virtual, KM_USER0);
+ }
+
+ page_virtual = kmap_atomic(page, KM_USER0);
+ desc_virtual = page_virtual - 1;
+ prev_pfn = ~(0UL);
+
+ while (likely(num_pages != 0)) {
+ pfn = page_to_pfn(*pages);
+
+ if (pfn != prev_pfn + 1) {
+
+ if (desc_virtual - page_virtual ==
+ desc_per_page - 1)
+ break;
+
+ (++desc_virtual)->ppn = cpu_to_le32(pfn);
+ desc_virtual->num_pages = cpu_to_le32(1);
+ } else {
+ uint32_t tmp =
+ le32_to_cpu(desc_virtual->num_pages);
+ desc_virtual->num_pages = cpu_to_le32(tmp + 1);
+ }
+ prev_pfn = pfn;
+ --num_pages;
+ ++pages;
+ }
+
+ (++desc_virtual)->ppn = cpu_to_le32(0);
+ desc_virtual->num_pages = cpu_to_le32(0);
+ }
+
+ if (likely(page_virtual != NULL))
+ kunmap_atomic(page_virtual, KM_USER0);
+
+ return 0;
+out_err:
+ list_for_each_entry_safe(page, next, desc_pages, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
+ return ret;
+}
+
+static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, desc_pages, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
+}
+
+static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
+ int gmr_id, struct list_head *desc_pages)
+{
+ struct page *page;
+
+ if (unlikely(list_empty(desc_pages)))
+ return;
+
+ page = list_entry(desc_pages->next, struct page, lru);
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+ wmb();
+ vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
+ mb();
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+}
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
+ unsigned long num_pages)
+{
+ unsigned long prev_pfn = ~(0UL);
+ unsigned long pfn;
+ unsigned long descriptors = 0;
+
+ while (num_pages--) {
+ pfn = page_to_pfn(*pages++);
+ if (prev_pfn + 1 != pfn)
+ ++descriptors;
+ prev_pfn = pfn;
+ }
+
+ return descriptors;
+}
+
+int vmw_gmr_bind(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ unsigned long descriptors;
+ int ret;
+ uint32_t id;
+ struct list_head desc_pages;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_GMR))
+ return -EINVAL;
+
+ ret = ttm_tt_populate(ttm);
+ if (unlikely(ret != 0))
+ return ret;
+
+ descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
+ if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&desc_pages);
+ ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
+ ttm->num_pages);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_gmr_id_alloc(dev_priv, &id);
+ if (unlikely(ret != 0))
+ goto out_no_id;
+
+ vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
+ vmw_gmr_free_descriptors(&desc_pages);
+ vmw_dmabuf_set_gmr(bo, id);
+ return 0;
+
+out_no_id:
+ vmw_gmr_free_descriptors(&desc_pages);
+ return ret;
+}
+
+void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+ wmb();
+ vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
+ mb();
+ mutex_unlock(&dev_priv->hw_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 00000000000..1c7a316454d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,87 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_drm.h"
+
+int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_getparam_arg *param =
+ (struct drm_vmw_getparam_arg *)data;
+
+ switch (param->param) {
+ case DRM_VMW_PARAM_NUM_STREAMS:
+ param->value = vmw_overlay_num_overlays(dev_priv);
+ break;
+ case DRM_VMW_PARAM_NUM_FREE_STREAMS:
+ param->value = vmw_overlay_num_free_overlays(dev_priv);
+ break;
+ case DRM_VMW_PARAM_3D:
+ param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+ break;
+ case DRM_VMW_PARAM_FIFO_OFFSET:
+ param->value = dev_priv->mmio_start;
+ break;
+ case DRM_VMW_PARAM_HW_CAPS:
+ param->value = dev_priv->capabilities;
+ break;
+ case DRM_VMW_PARAM_FIFO_CAPS:
+ param->value = dev_priv->fifo.capabilities;
+ break;
+ default:
+ DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+ param->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct drm_vmw_fifo_debug_arg *arg =
+ (struct drm_vmw_fifo_debug_arg *)data;
+ __le32 __user *buffer = (__le32 __user *)
+ (unsigned long)arg->debug_buffer;
+
+ if (unlikely(fifo_state->last_buffer == NULL))
+ return -EINVAL;
+
+ if (arg->debug_buffer_size < fifo_state->last_data_size) {
+ arg->used_size = arg->debug_buffer_size;
+ arg->did_not_fit = 1;
+ } else {
+ arg->used_size = fifo_state->last_data_size;
+ arg->did_not_fit = 0;
+ }
+ return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 00000000000..4d7cb539386
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,286 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#define VMW_FENCE_WRAP (1 << 24)
+
+irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ spin_lock(&dev_priv->irq_lock);
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ spin_unlock(&dev_priv->irq_lock);
+
+ if (status & SVGA_IRQFLAG_ANY_FENCE)
+ wake_up_all(&dev_priv->fence_queue);
+ if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
+ wake_up_all(&dev_priv->fifo_queue);
+
+ if (likely(status)) {
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
+{
+ uint32_t busy;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ busy = vmw_read(dev_priv, SVGA_REG_BUSY);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return (busy == 0);
+}
+
+
+bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ struct vmw_fifo_state *fifo_state;
+ bool ret;
+
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return true;
+
+ dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return true;
+
+ fifo_state = &dev_priv->fifo;
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+ vmw_fifo_idle(dev_priv, sequence))
+ return true;
+
+ /**
+ * Then check if the sequence is higher than what we've actually
+ * emitted. Then the fence is stale and signaled.
+ */
+
+ ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
+ > VMW_FENCE_WRAP);
+
+ return ret;
+}
+
+int vmw_fallback_wait(struct vmw_private *dev_priv,
+ bool lazy,
+ bool fifo_idle,
+ uint32_t sequence,
+ bool interruptible,
+ unsigned long timeout)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+
+ uint32_t count = 0;
+ uint32_t signal_seq;
+ int ret;
+ unsigned long end_jiffies = jiffies + timeout;
+ bool (*wait_condition)(struct vmw_private *, uint32_t);
+ DEFINE_WAIT(__wait);
+
+ wait_condition = (fifo_idle) ? &vmw_fifo_idle :
+ &vmw_fence_signaled;
+
+ /**
+ * Block command submission while waiting for idle.
+ */
+
+ if (fifo_idle)
+ down_read(&fifo_state->rwsem);
+ signal_seq = atomic_read(&dev_priv->fence_seq);
+ ret = 0;
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->fence_queue, &__wait,
+ (interruptible) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (wait_condition(dev_priv, sequence))
+ break;
+ if (time_after_eq(jiffies, end_jiffies)) {
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+ if (lazy)
+ schedule_timeout(1);
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+ * newer kernels and lower CPU utilization.
+ */
+
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ __set_current_state((interruptible) ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ }
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&dev_priv->fence_queue, &__wait);
+ if (ret == 0 && fifo_idle) {
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
+ }
+ wake_up_all(&dev_priv->fence_queue);
+ if (fifo_idle)
+ up_read(&fifo_state->rwsem);
+
+ return ret;
+}
+
+int vmw_wait_fence(struct vmw_private *dev_priv,
+ bool lazy, uint32_t sequence,
+ bool interruptible, unsigned long timeout)
+{
+ long ret;
+ unsigned long irq_flags;
+ struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return 0;
+
+ if (likely(vmw_fence_signaled(dev_priv, sequence)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+
+ if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
+ return vmw_fallback_wait(dev_priv, lazy, true, sequence,
+ interruptible, timeout);
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return vmw_fallback_wait(dev_priv, lazy, false, sequence,
+ interruptible, timeout);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_ANY_FENCE,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (dev_priv->fence_queue,
+ vmw_fence_signaled(dev_priv, sequence),
+ timeout);
+ else
+ ret = wait_event_timeout
+ (dev_priv->fence_queue,
+ vmw_fence_signaled(dev_priv, sequence),
+ timeout);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ret;
+}
+
+void vmw_irq_preinstall(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return;
+
+ spin_lock_init(&dev_priv->irq_lock);
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+int vmw_irq_postinstall(struct drm_device *dev)
+{
+ return 0;
+}
+
+void vmw_irq_uninstall(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
+
+int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_wait_arg *arg =
+ (struct drm_vmw_fence_wait_arg *)data;
+ unsigned long timeout;
+
+ if (!arg->cookie_valid) {
+ arg->cookie_valid = 1;
+ arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
+ }
+
+ timeout = jiffies;
+ if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
+ return -EBUSY;
+
+ timeout = (unsigned long)arg->kernel_cookie - timeout;
+ return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 00000000000..31f9afed0a6
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,880 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+/* Might need a hrtimer here? */
+#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+
+void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+{
+ if (du->cursor_surface)
+ vmw_surface_unreference(&du->cursor_surface);
+ if (du->cursor_dmabuf)
+ vmw_dmabuf_unreference(&du->cursor_dmabuf);
+ drm_crtc_cleanup(&du->crtc);
+ drm_encoder_cleanup(&du->encoder);
+ drm_connector_cleanup(&du->connector);
+}
+
+/*
+ * Display Unit Cursor functions
+ */
+
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+ } *cmd;
+ u32 image_size = width * height * 4;
+ u32 cmd_size = sizeof(*cmd) + image_size;
+
+ if (!image)
+ return -EINVAL;
+
+ cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
+ cmd->cursor.id = cpu_to_le32(0);
+ cmd->cursor.width = cpu_to_le32(width);
+ cmd->cursor.height = cpu_to_le32(height);
+ cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
+ cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+
+ vmw_fifo_commit(dev_priv, cmd_size);
+
+ return 0;
+}
+
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t count;
+
+ iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
+ iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
+ iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
+ count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+ iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+}
+
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_surface *surface = NULL;
+ struct vmw_dma_buffer *dmabuf = NULL;
+ int ret;
+
+ if (handle) {
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ handle, &surface);
+ if (!ret) {
+ if (!surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ } else {
+ ret = vmw_user_dmabuf_lookup(tfile,
+ handle, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* takedown old cursor */
+ if (du->cursor_surface) {
+ du->cursor_surface->snooper.crtc = NULL;
+ vmw_surface_unreference(&du->cursor_surface);
+ }
+ if (du->cursor_dmabuf)
+ vmw_dmabuf_unreference(&du->cursor_dmabuf);
+
+ /* setup new image */
+ if (surface) {
+ /* vmw_user_surface_lookup takes one reference */
+ du->cursor_surface = surface;
+
+ du->cursor_surface->snooper.crtc = crtc;
+ du->cursor_age = du->cursor_surface->snooper.age;
+ vmw_cursor_update_image(dev_priv, surface->snooper.image,
+ 64, 64, du->hotspot_x, du->hotspot_y);
+ } else if (dmabuf) {
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+
+ /* vmw_user_surface_lookup takes one reference */
+ du->cursor_dmabuf = dmabuf;
+
+ kmap_offset = 0;
+ kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ vmw_cursor_update_image(dev_priv, virtual, 64, 64,
+ du->hotspot_x, du->hotspot_y);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ } else {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return 0;
+ }
+
+ vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
+
+ return 0;
+}
+
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
+
+ du->cursor_x = x + crtc->x;
+ du->cursor_y = y + crtc->y;
+
+ vmw_cursor_update_position(dev_priv, shown,
+ du->cursor_x, du->cursor_y);
+
+ return 0;
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ unsigned box_count;
+ void *virtual;
+ bool dummy;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.pitch != (64 * 4) ||
+ cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->w != 64 || box->h != 64 || box->d != 1 ||
+ box_count != 1) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle partial uploads and pitch != 256 */
+ /* TODO handle more then one copy (size != 64) */
+ DRM_ERROR("lazy programer, cant handle wierd stuff\n");
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+
+ memcpy(srf->snooper.image, virtual, 64*64*4);
+ srf->snooper.age++;
+
+ /* we can't call this function from this function since execbuf has
+ * reserved fifo space.
+ *
+ * if (srf->snooper.crtc)
+ * vmw_ldu_crtc_cursor_update_image(dev_priv,
+ * srf->snooper.image, 64, 64,
+ * du->hotspot_x, du->hotspot_y);
+ */
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_display_unit *du;
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ if (!du->cursor_surface ||
+ du->cursor_age == du->cursor_surface->snooper.age)
+ continue;
+
+ du->cursor_age = du->cursor_surface->snooper.age;
+ vmw_cursor_update_image(dev_priv,
+ du->cursor_surface->snooper.image,
+ 64, 64, du->hotspot_x, du->hotspot_y);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+/*
+ * Generic framebuffer code
+ */
+
+int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ if (handle)
+ handle = 0;
+
+ return 0;
+}
+
+/*
+ * Surface framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbs(x) \
+ container_of(x, struct vmw_framebuffer_surface, base.base)
+
+struct vmw_framebuffer_surface {
+ struct vmw_framebuffer base;
+ struct vmw_surface *surface;
+ struct delayed_work d_work;
+ struct mutex work_lock;
+ bool present_fs;
+};
+
+void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+{
+ struct vmw_framebuffer_surface *vfb =
+ vmw_framebuffer_to_vfbs(framebuffer);
+
+ cancel_delayed_work_sync(&vfb->d_work);
+ drm_framebuffer_cleanup(framebuffer);
+ vmw_surface_unreference(&vfb->surface);
+
+ kfree(framebuffer);
+}
+
+static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
+{
+ struct delayed_work *d_work =
+ container_of(work, struct delayed_work, work);
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(d_work, struct vmw_framebuffer_surface, d_work);
+ struct vmw_surface *surf = vfbs->surface;
+ struct drm_framebuffer *framebuffer = &vfbs->base.base;
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
+ } *cmd;
+
+ mutex_lock(&vfbs->work_lock);
+ if (!vfbs->present_fs)
+ goto out_unlock;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
+ goto out_resched;
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+ cmd->cr.x = cpu_to_le32(0);
+ cmd->cr.y = cpu_to_le32(0);
+ cmd->cr.srcx = cmd->cr.x;
+ cmd->cr.srcy = cmd->cr.y;
+ cmd->cr.w = cpu_to_le32(framebuffer->width);
+ cmd->cr.h = cpu_to_le32(framebuffer->height);
+ vfbs->present_fs = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+out_resched:
+ /**
+ * Will not re-add if already pending.
+ */
+ schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+out_unlock:
+ mutex_unlock(&vfbs->work_lock);
+}
+
+
+int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_surface *surf = vfbs->surface;
+ struct drm_clip_rect norect;
+ SVGA3dCopyRect *cr;
+ int i, inc = 1;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
+ } *cmd;
+
+ if (!num_clips ||
+ !(dev_priv->fifo.capabilities &
+ SVGA_FIFO_CAP_SCREEN_OBJECT)) {
+ int ret;
+
+ mutex_lock(&vfbs->work_lock);
+ vfbs->present_fs = true;
+ ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+ mutex_unlock(&vfbs->work_lock);
+ if (ret) {
+ /**
+ * No work pending, Force immediate present.
+ */
+ vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
+ }
+ return 0;
+ }
+
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ inc = 2; /* skip source rects */
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+
+ for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
+ cr->x = cpu_to_le16(clips->x1);
+ cr->y = cpu_to_le16(clips->y1);
+ cr->srcx = cr->x;
+ cr->srcy = cr->y;
+ cr->w = cpu_to_le16(clips->x2 - clips->x1);
+ cr->h = cpu_to_le16(clips->y2 - clips->y1);
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+ .destroy = vmw_framebuffer_surface_destroy,
+ .dirty = vmw_framebuffer_surface_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+ struct vmw_surface *surface,
+ struct vmw_framebuffer **out,
+ unsigned width, unsigned height)
+
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_framebuffer_surface *vfbs;
+ int ret;
+
+ vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
+ if (!vfbs) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = drm_framebuffer_init(dev, &vfbs->base.base,
+ &vmw_framebuffer_surface_funcs);
+ if (ret)
+ goto out_err2;
+
+ if (!vmw_surface_reference(surface)) {
+ DRM_ERROR("failed to reference surface %p\n", surface);
+ goto out_err3;
+ }
+
+ /* XXX get the first 3 from the surface info */
+ vfbs->base.base.bits_per_pixel = 32;
+ vfbs->base.base.pitch = width * 32 / 4;
+ vfbs->base.base.depth = 24;
+ vfbs->base.base.width = width;
+ vfbs->base.base.height = height;
+ vfbs->base.pin = NULL;
+ vfbs->base.unpin = NULL;
+ vfbs->surface = surface;
+ mutex_init(&vfbs->work_lock);
+ INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
+ *out = &vfbs->base;
+
+ return 0;
+
+out_err3:
+ drm_framebuffer_cleanup(&vfbs->base.base);
+out_err2:
+ kfree(vfbs);
+out_err1:
+ return ret;
+}
+
+/*
+ * Dmabuf framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbd(x) \
+ container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+
+struct vmw_framebuffer_dmabuf {
+ struct vmw_framebuffer base;
+ struct vmw_dma_buffer *buffer;
+};
+
+void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+{
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(framebuffer);
+
+ drm_framebuffer_cleanup(framebuffer);
+ vmw_dmabuf_unreference(&vfbd->buffer);
+
+ kfree(vfbd);
+}
+
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct drm_clip_rect norect;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+ int i, increment = 1;
+
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ increment = 2;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_clips; i++, clips += increment) {
+ cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
+ cmd[i].body.x = cpu_to_le32(clips->x1);
+ cmd[i].body.y = cpu_to_le32(clips->y1);
+ cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
+ cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+ .destroy = vmw_framebuffer_dmabuf_destroy,
+ .dirty = vmw_framebuffer_dmabuf_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+ int ret;
+
+ vmw_overlay_pause_all(dev_priv);
+
+ ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
+
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+ vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
+ vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
+ vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
+ vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
+ vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+ } else
+ WARN_ON(true);
+
+ vmw_overlay_resume_all(dev_priv);
+
+ return 0;
+}
+
+static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+
+ if (!vfbd->buffer) {
+ WARN_ON(!vfbd->buffer);
+ return 0;
+ }
+
+ return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
+}
+
+int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_framebuffer **out,
+ unsigned width, unsigned height)
+
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_framebuffer_dmabuf *vfbd;
+ int ret;
+
+ vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
+ if (!vfbd) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = drm_framebuffer_init(dev, &vfbd->base.base,
+ &vmw_framebuffer_dmabuf_funcs);
+ if (ret)
+ goto out_err2;
+
+ if (!vmw_dmabuf_reference(dmabuf)) {
+ DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
+ goto out_err3;
+ }
+
+ /* XXX get the first 3 from the surface info */
+ vfbd->base.base.bits_per_pixel = 32;
+ vfbd->base.base.pitch = width * 32 / 4;
+ vfbd->base.base.depth = 24;
+ vfbd->base.base.width = width;
+ vfbd->base.base.height = height;
+ vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+ vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+ vfbd->buffer = dmabuf;
+ *out = &vfbd->base;
+
+ return 0;
+
+out_err3:
+ drm_framebuffer_cleanup(&vfbd->base.base);
+out_err2:
+ kfree(vfbd);
+out_err1:
+ return ret;
+}
+
+/*
+ * Generic Kernel modesetting functions
+ */
+
+static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_framebuffer *vfb = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_dma_buffer *bo = NULL;
+ int ret;
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ mode_cmd->handle, &surface);
+ if (ret)
+ goto try_dmabuf;
+
+ if (!surface->scanout)
+ goto err_not_scanout;
+
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+ mode_cmd->width, mode_cmd->height);
+
+ /* vmw_user_surface_lookup takes one ref so does new_fb */
+ vmw_surface_unreference(&surface);
+
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return NULL;
+ }
+ return &vfb->base;
+
+try_dmabuf:
+ DRM_INFO("%s: trying buffer\n", __func__);
+
+ ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
+ if (ret) {
+ DRM_ERROR("failed to find buffer: %i\n", ret);
+ return NULL;
+ }
+
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ mode_cmd->width, mode_cmd->height);
+
+ /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
+ vmw_dmabuf_unreference(&bo);
+
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return NULL;
+ }
+
+ return &vfb->base;
+
+err_not_scanout:
+ DRM_ERROR("surface not marked as scanout\n");
+ /* vmw_user_surface_lookup takes one ref */
+ vmw_surface_unreference(&surface);
+
+ return NULL;
+}
+
+static int vmw_kms_fb_changed(struct drm_device *dev)
+{
+ return 0;
+}
+
+static struct drm_mode_config_funcs vmw_kms_funcs = {
+ .fb_create = vmw_kms_fb_create,
+ .fb_changed = vmw_kms_fb_changed,
+};
+
+int vmw_kms_init(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int ret;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = &vmw_kms_funcs;
+ dev->mode_config.min_width = 1;
+ dev->mode_config.min_height = 1;
+ dev->mode_config.max_width = dev_priv->fb_max_width;
+ dev->mode_config.max_height = dev_priv->fb_max_height;
+
+ ret = vmw_kms_init_legacy_display_system(dev_priv);
+
+ return 0;
+}
+
+int vmw_kms_close(struct vmw_private *dev_priv)
+{
+ /*
+ * Docs says we should take the lock before calling this function
+ * but since it destroys encoders and our destructor calls
+ * drm_encoder_cleanup which takes the lock we deadlock.
+ */
+ drm_mode_config_cleanup(dev_priv->dev);
+ vmw_kms_close_legacy_display_system(dev_priv);
+ return 0;
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ du->hotspot_x = arg->xhot;
+ du->hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = obj_to_crtc(obj);
+ du = vmw_crtc_to_du(crtc);
+
+ du->hotspot_x = arg->xhot;
+ du->hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+int vmw_kms_save_vga(struct vmw_private *vmw_priv)
+{
+ /*
+ * setup a single multimon monitor with the size
+ * of 0x0, this stops the UI from resizing when we
+ * change the framebuffer size
+ */
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+ vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
+ vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
+ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
+ vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
+ vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
+ vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
+ vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+
+ return 0;
+}
+
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
+{
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
+ vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
+
+ /* TODO check for multimon */
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 00000000000..8b95249f053
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,102 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_KMS_H_
+#define VMWGFX_KMS_H_
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+
+#define vmw_framebuffer_to_vfb(x) \
+ container_of(x, struct vmw_framebuffer, base)
+
+/**
+ * Base class for framebuffers
+ *
+ * @pin is called the when ever a crtc uses this framebuffer
+ * @unpin is called
+ */
+struct vmw_framebuffer {
+ struct drm_framebuffer base;
+ int (*pin)(struct vmw_framebuffer *fb);
+ int (*unpin)(struct vmw_framebuffer *fb);
+};
+
+
+#define vmw_crtc_to_du(x) \
+ container_of(x, struct vmw_display_unit, crtc)
+
+/*
+ * Basic cursor manipulation
+ */
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y);
+
+/**
+ * Base class display unit.
+ *
+ * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
+ * so the display unit is all of them at the same time. This is true for both
+ * legacy multimon and screen objects.
+ */
+struct vmw_display_unit {
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct vmw_surface *cursor_surface;
+ struct vmw_dma_buffer *cursor_dmabuf;
+ size_t cursor_age;
+
+ int cursor_x;
+ int cursor_y;
+
+ int hotspot_x;
+ int hotspot_y;
+
+ unsigned unit;
+};
+
+/*
+ * Shared display unit functions - vmwgfx_kms.c
+ */
+void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+/*
+ * Legacy display unit functions - vmwgfx_ldu.h
+ */
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 00000000000..90891593bf6
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,516 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+#define vmw_crtc_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.crtc)
+#define vmw_encoder_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.encoder)
+#define vmw_connector_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.connector)
+
+struct vmw_legacy_display {
+ struct list_head active;
+
+ unsigned num_active;
+
+ struct vmw_framebuffer *fb;
+};
+
+/**
+ * Display unit using the legacy register interface.
+ */
+struct vmw_legacy_display_unit {
+ struct vmw_display_unit base;
+
+ struct list_head active;
+
+ unsigned unit;
+};
+
+static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
+{
+ list_del_init(&ldu->active);
+ vmw_display_unit_cleanup(&ldu->base);
+ kfree(ldu);
+}
+
+
+/*
+ * Legacy Display Unit CRTC functions
+ */
+
+static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t size)
+{
+}
+
+static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
+{
+ vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
+}
+
+static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+{
+ struct vmw_legacy_display *lds = dev_priv->ldu_priv;
+ struct vmw_legacy_display_unit *entry;
+ struct drm_crtc *crtc;
+ int i = 0;
+
+ /* to stop the screen from changing size on resize */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+ for (i = 0; i < lds->num_active; i++) {
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+ /* Now set the mode */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+ i = 0;
+ list_for_each_entry(entry, &lds->active, active) {
+ crtc = &entry->base.crtc;
+
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
+ struct vmw_legacy_display_unit *ldu)
+{
+ struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+ if (list_empty(&ldu->active))
+ return 0;
+
+ list_del_init(&ldu->active);
+ if (--(ld->num_active) == 0) {
+ BUG_ON(!ld->fb);
+ if (ld->fb->unpin)
+ ld->fb->unpin(ld->fb);
+ ld->fb = NULL;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
+ struct vmw_legacy_display_unit *ldu,
+ struct vmw_framebuffer *vfb)
+{
+ struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+ struct vmw_legacy_display_unit *entry;
+ struct list_head *at;
+
+ if (!list_empty(&ldu->active))
+ return 0;
+
+ at = &ld->active;
+ list_for_each_entry(entry, &ld->active, active) {
+ if (entry->unit > ldu->unit)
+ break;
+
+ at = &entry->active;
+ }
+
+ list_add(&ldu->active, at);
+ if (ld->num_active++ == 0) {
+ BUG_ON(ld->fb);
+ if (vfb->pin)
+ vfb->pin(vfb);
+ ld->fb = vfb;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct vmw_framebuffer *vfb;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+
+ if (!set)
+ return -EINVAL;
+
+ if (!set->crtc)
+ return -EINVAL;
+
+ /* get the ldu */
+ crtc = set->crtc;
+ ldu = vmw_crtc_to_ldu(crtc);
+ vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+ dev_priv = vmw_priv(crtc->dev);
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("to many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &ldu->base.connector) {
+ DRM_ERROR("connector doesn't match %p %p\n",
+ set->connectors[0], &ldu->base.connector);
+ return -EINVAL;
+ }
+
+ /* ldu only supports one fb active at the time */
+ if (dev_priv->ldu_priv->fb && vfb &&
+ dev_priv->ldu_priv->fb != vfb) {
+ DRM_ERROR("Multiple framebuffers not supported\n");
+ return -EINVAL;
+ }
+
+ /* since they always map one to one these are safe */
+ connector = &ldu->base.connector;
+ encoder = &ldu->base.encoder;
+
+ /* should we turn the crtc off? */
+ if (set->num_connectors == 0 || !set->mode || !set->fb) {
+
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ crtc->fb = NULL;
+
+ vmw_ldu_del_active(dev_priv, ldu);
+
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
+ }
+
+
+ /* we now know we want to set a mode */
+ mode = set->mode;
+ fb = set->fb;
+
+ if (set->x + mode->hdisplay > fb->width ||
+ set->y + mode->vdisplay > fb->height) {
+ DRM_ERROR("set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ vmw_fb_off(dev_priv);
+
+ crtc->fb = fb;
+ encoder->crtc = crtc;
+ connector->encoder = encoder;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ crtc->mode = *mode;
+
+ vmw_ldu_add_active(dev_priv, ldu, vfb);
+
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
+}
+
+static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+ .save = vmw_ldu_crtc_save,
+ .restore = vmw_ldu_crtc_restore,
+ .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_move = vmw_du_crtc_cursor_move,
+ .gamma_set = vmw_ldu_crtc_gamma_set,
+ .destroy = vmw_ldu_crtc_destroy,
+ .set_config = vmw_ldu_crtc_set_config,
+};
+
+/*
+ * Legacy Display Unit encoder functions
+ */
+
+static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
+{
+ vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
+ .destroy = vmw_ldu_encoder_destroy,
+};
+
+/*
+ * Legacy Display Unit connector functions
+ */
+
+static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+static void vmw_ldu_connector_save(struct drm_connector *connector)
+{
+}
+
+static void vmw_ldu_connector_restore(struct drm_connector *connector)
+{
+}
+
+static enum drm_connector_status
+ vmw_ldu_connector_detect(struct drm_connector *connector)
+{
+ /* XXX vmwctrl should control connection status */
+ if (vmw_connector_to_ldu(connector)->base.unit == 0)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static struct drm_display_mode vmw_ldu_connector_builtin[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
+ 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* Terminate */
+ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ int i;
+
+ for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
+ vmw_ldu_connector_builtin[i].vdisplay > max_height)
+ continue;
+
+ mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
+ if (!mode)
+ return 0;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ drm_mode_connector_list_update(connector);
+
+ return 1;
+}
+
+static int vmw_ldu_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void vmw_ldu_connector_destroy(struct drm_connector *connector)
+{
+ vmw_ldu_destroy(vmw_connector_to_ldu(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ .dpms = vmw_ldu_connector_dpms,
+ .save = vmw_ldu_connector_save,
+ .restore = vmw_ldu_connector_restore,
+ .detect = vmw_ldu_connector_detect,
+ .fill_modes = vmw_ldu_connector_fill_modes,
+ .set_property = vmw_ldu_connector_set_property,
+ .destroy = vmw_ldu_connector_destroy,
+};
+
+static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
+ if (!ldu)
+ return -ENOMEM;
+
+ ldu->unit = unit;
+ crtc = &ldu->base.crtc;
+ encoder = &ldu->base.encoder;
+ connector = &ldu->base.connector;
+
+ drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ /* Initial status */
+ if (unit == 0)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+
+ drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ encoder->possible_crtcs = (1 << unit);
+ encoder->possible_clones = 0;
+
+ INIT_LIST_HEAD(&ldu->active);
+
+ drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dirty_info_property,
+ 1);
+
+ return 0;
+}
+
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+{
+ if (dev_priv->ldu_priv) {
+ DRM_INFO("ldu system already on\n");
+ return -EINVAL;
+ }
+
+ dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+
+ if (!dev_priv->ldu_priv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
+ dev_priv->ldu_priv->num_active = 0;
+ dev_priv->ldu_priv->fb = NULL;
+
+ drm_mode_create_dirty_info_property(dev_priv->dev);
+
+ vmw_ldu_init(dev_priv, 0);
+ vmw_ldu_init(dev_priv, 1);
+ vmw_ldu_init(dev_priv, 2);
+ vmw_ldu_init(dev_priv, 3);
+ vmw_ldu_init(dev_priv, 4);
+ vmw_ldu_init(dev_priv, 5);
+ vmw_ldu_init(dev_priv, 6);
+ vmw_ldu_init(dev_priv, 7);
+
+ return 0;
+}
+
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+{
+ if (!dev_priv->ldu_priv)
+ return -ENOSYS;
+
+ BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
+
+ kfree(dev_priv->ldu_priv);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 00000000000..5b6eabeb7f5
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,625 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#include "ttm/ttm_placement.h"
+
+#include "svga_overlay.h"
+#include "svga_escape.h"
+
+#define VMW_MAX_NUM_STREAMS 1
+
+struct vmw_stream {
+ struct vmw_dma_buffer *buf;
+ bool claimed;
+ bool paused;
+ struct drm_vmw_control_stream_arg saved;
+};
+
+/**
+ * Overlay control
+ */
+struct vmw_overlay {
+ /*
+ * Each stream is a single overlay. In Xv these are called ports.
+ */
+ struct mutex mutex;
+ struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
+};
+
+static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ return dev_priv ? dev_priv->overlay_priv : NULL;
+}
+
+struct vmw_escape_header {
+ uint32_t cmd;
+ SVGAFifoCmdEscape body;
+};
+
+struct vmw_escape_video_flush {
+ struct vmw_escape_header escape;
+ SVGAEscapeVideoFlush flush;
+};
+
+static inline void fill_escape(struct vmw_escape_header *header,
+ uint32_t size)
+{
+ header->cmd = SVGA_CMD_ESCAPE;
+ header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
+ header->body.size = size;
+}
+
+static inline void fill_flush(struct vmw_escape_video_flush *cmd,
+ uint32_t stream_id)
+{
+ fill_escape(&cmd->escape, sizeof(cmd->flush));
+ cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
+ cmd->flush.streamId = stream_id;
+}
+
+/**
+ * Pin or unpin a buffer in vram.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to pin or unpin.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Takes the current masters ttm lock in read.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement *overlay_placement = &vmw_vram_placement;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (pin)
+ overlay_placement = &vmw_vram_ne_placement;
+
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->active_master->lock);
+
+ return ret;
+}
+
+/**
+ * Send put command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct drm_vmw_control_stream_arg *arg,
+ bool interruptible)
+{
+ struct {
+ struct vmw_escape_header escape;
+ struct {
+ struct {
+ uint32_t cmdType;
+ uint32_t streamId;
+ } header;
+ struct {
+ uint32_t registerId;
+ uint32_t value;
+ } items[SVGA_VIDEO_PITCH_3 + 1];
+ } body;
+ struct vmw_escape_video_flush flush;
+ } *cmds;
+ uint32_t offset;
+ int i, ret;
+
+ for (;;) {
+ cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ if (cmds)
+ break;
+
+ ret = vmw_fallback_wait(dev_priv, false, true, 0,
+ interruptible, 3*HZ);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ fill_escape(&cmds->escape, sizeof(cmds->body));
+ cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->body.header.streamId = arg->stream_id;
+
+ for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
+ cmds->body.items[i].registerId = i;
+
+ offset = buf->base.offset + arg->offset;
+
+ cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
+ cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
+ cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
+ cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
+ cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
+ cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
+ cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
+ cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
+ cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
+ cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
+ cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
+ cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
+ cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
+ cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
+ cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
+ cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
+ cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
+ cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
+ cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
+
+ fill_flush(&cmds->flush, arg->stream_id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+ return 0;
+}
+
+/**
+ * Send stop command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
+ uint32_t stream_id,
+ bool interruptible)
+{
+ struct {
+ struct vmw_escape_header escape;
+ SVGAEscapeVideoSetRegs body;
+ struct vmw_escape_video_flush flush;
+ } *cmds;
+ int ret;
+
+ for (;;) {
+ cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ if (cmds)
+ break;
+
+ ret = vmw_fallback_wait(dev_priv, false, true, 0,
+ interruptible, 3*HZ);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ fill_escape(&cmds->escape, sizeof(cmds->body));
+ cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->body.header.streamId = stream_id;
+ cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
+ cmds->body.items[0].value = false;
+ fill_flush(&cmds->flush, stream_id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+ return 0;
+}
+
+/**
+ * Stop or pause a stream.
+ *
+ * If the stream is paused the no evict flag is removed from the buffer
+ * but left in vram. This allows for instance mode_set to evict it
+ * should it need to.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * @stream_id which stream to stop/pause.
+ * @pause true to pause, false to stop completely.
+ */
+static int vmw_overlay_stop(struct vmw_private *dev_priv,
+ uint32_t stream_id, bool pause,
+ bool interruptible)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct vmw_stream *stream = &overlay->stream[stream_id];
+ int ret;
+
+ /* no buffer attached the stream is completely stopped */
+ if (!stream->buf)
+ return 0;
+
+ /* If the stream is paused this is already done */
+ if (!stream->paused) {
+ ret = vmw_overlay_send_stop(dev_priv, stream_id,
+ interruptible);
+ if (ret)
+ return ret;
+
+ /* We just remove the NO_EVICT flag so no -ENOMEM */
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
+ interruptible);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ if (!pause) {
+ vmw_dmabuf_unreference(&stream->buf);
+ stream->paused = false;
+ } else {
+ stream->paused = true;
+ }
+
+ return 0;
+}
+
+/**
+ * Update a stream and send any put or stop fifo commands needed.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * Returns
+ * -ENOMEM if buffer doesn't fit in vram.
+ * -ERESTARTSYS if interrupted.
+ */
+static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct drm_vmw_control_stream_arg *arg,
+ bool interruptible)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct vmw_stream *stream = &overlay->stream[arg->stream_id];
+ int ret = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
+ stream->buf, buf, stream->paused ? "" : "not ");
+
+ if (stream->buf != buf) {
+ ret = vmw_overlay_stop(dev_priv, arg->stream_id,
+ false, interruptible);
+ if (ret)
+ return ret;
+ } else if (!stream->paused) {
+ /* If the buffers match and not paused then just send
+ * the put command, no need to do anything else.
+ */
+ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+ if (ret == 0)
+ stream->saved = *arg;
+ else
+ BUG_ON(!interruptible);
+
+ return ret;
+ }
+
+ /* We don't start the old stream if we are interrupted.
+ * Might return -ENOMEM if it can't fit the buffer in vram.
+ */
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
+ if (ret)
+ return ret;
+
+ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+ if (ret) {
+ /* This one needs to happen no matter what. We only remove
+ * the NO_EVICT flag so this is safe from -ENOMEM.
+ */
+ BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
+ return ret;
+ }
+
+ if (stream->buf != buf)
+ stream->buf = vmw_dmabuf_reference(buf);
+ stream->saved = *arg;
+
+ return 0;
+}
+
+/**
+ * Stop all streams.
+ *
+ * Used by the fb code when starting.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_stop_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ struct vmw_stream *stream = &overlay->stream[i];
+ if (!stream->buf)
+ continue;
+
+ ret = vmw_overlay_stop(dev_priv, i, false, false);
+ WARN_ON(ret != 0);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+/**
+ * Try to resume all paused streams.
+ *
+ * Used by the kms code after moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_resume_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ struct vmw_stream *stream = &overlay->stream[i];
+ if (!stream->paused)
+ continue;
+
+ ret = vmw_overlay_update_stream(dev_priv, stream->buf,
+ &stream->saved, false);
+ if (ret != 0)
+ DRM_INFO("%s: *warning* failed to resume stream %i\n",
+ __func__, i);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+/**
+ * Pauses all active streams.
+ *
+ * Used by the kms code when moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_pause_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ if (overlay->stream[i].paused)
+ DRM_INFO("%s: *warning* stream %i already paused\n",
+ __func__, i);
+ ret = vmw_overlay_stop(dev_priv, i, true, false);
+ WARN_ON(ret != 0);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct drm_vmw_control_stream_arg *arg =
+ (struct drm_vmw_control_stream_arg *)data;
+ struct vmw_dma_buffer *buf;
+ struct vmw_resource *res;
+ int ret;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
+ if (ret)
+ return ret;
+
+ mutex_lock(&overlay->mutex);
+
+ if (!arg->enabled) {
+ ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
+ goto out_unlock;
+ }
+
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+ if (ret)
+ goto out_unlock;
+
+ ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+
+ vmw_dmabuf_unreference(&buf);
+
+out_unlock:
+ mutex_unlock(&overlay->mutex);
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
+{
+ if (!dev_priv->overlay_priv)
+ return 0;
+
+ return VMW_MAX_NUM_STREAMS;
+}
+
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, k;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
+ if (!overlay->stream[i].claimed)
+ k++;
+
+ mutex_unlock(&overlay->mutex);
+
+ return k;
+}
+
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+
+ if (overlay->stream[i].claimed)
+ continue;
+
+ overlay->stream[i].claimed = true;
+ *out = i;
+ mutex_unlock(&overlay->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&overlay->mutex);
+ return -ESRCH;
+}
+
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+
+ BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
+
+ if (!overlay)
+ return -ENOSYS;
+
+ mutex_lock(&overlay->mutex);
+
+ WARN_ON(!overlay->stream[stream_id].claimed);
+ vmw_overlay_stop(dev_priv, stream_id, false, false);
+ overlay->stream[stream_id].claimed = false;
+
+ mutex_unlock(&overlay->mutex);
+ return 0;
+}
+
+int vmw_overlay_init(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay;
+ int i;
+
+ if (dev_priv->overlay_priv)
+ return -EINVAL;
+
+ if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
+ (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
+ DRM_INFO("hardware doesn't support overlays\n");
+ return -ENOSYS;
+ }
+
+ overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+ if (!overlay)
+ return -ENOMEM;
+
+ memset(overlay, 0, sizeof(*overlay));
+ mutex_init(&overlay->mutex);
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ overlay->stream[i].buf = NULL;
+ overlay->stream[i].paused = false;
+ overlay->stream[i].claimed = false;
+ }
+
+ dev_priv->overlay_priv = overlay;
+
+ return 0;
+}
+
+int vmw_overlay_close(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ bool forgotten_buffer = false;
+ int i;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ if (overlay->stream[i].buf) {
+ forgotten_buffer = true;
+ vmw_overlay_stop(dev_priv, i, false, false);
+ }
+ }
+
+ WARN_ON(forgotten_buffer);
+
+ dev_priv->overlay_priv = NULL;
+ kfree(overlay);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 00000000000..9d0dd3a342e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,57 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * This file contains virtual hardware defines for kernel space.
+ */
+
+#ifndef _VMWGFX_REG_H_
+#define _VMWGFX_REG_H_
+
+#include <linux/types.h>
+
+#define VMWGFX_INDEX_PORT 0x0
+#define VMWGFX_VALUE_PORT 0x1
+#define VMWGFX_IRQSTATUS_PORT 0x8
+
+struct svga_guest_mem_descriptor {
+ __le32 ppn;
+ __le32 num_pages;
+};
+
+struct svga_fifo_cmd_fence {
+ __le32 fence;
+};
+
+#define SVGA_SYNC_GENERIC 1
+#define SVGA_SYNC_FIFOFULL 2
+
+#include "svga_types.h"
+
+#include "svga3d_reg.h"
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 00000000000..f8fbbc67a40
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1187 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_drm.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_placement.h"
+#include "drmP.h"
+
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+
+struct vmw_user_context {
+ struct ttm_base_object base;
+ struct vmw_resource res;
+};
+
+struct vmw_user_surface {
+ struct ttm_base_object base;
+ struct vmw_surface srf;
+};
+
+struct vmw_user_dma_buffer {
+ struct ttm_base_object base;
+ struct vmw_dma_buffer dma;
+};
+
+struct vmw_bo_user_rep {
+ uint32_t handle;
+ uint64_t map_handle;
+};
+
+struct vmw_stream {
+ struct vmw_resource res;
+ uint32_t stream_id;
+};
+
+struct vmw_user_stream {
+ struct ttm_base_object base;
+ struct vmw_stream stream;
+};
+
+static inline struct vmw_dma_buffer *
+vmw_dma_buffer(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_dma_buffer, base);
+}
+
+static inline struct vmw_user_dma_buffer *
+vmw_user_dma_buffer(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
+}
+
+struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
+{
+ kref_get(&res->kref);
+ return res;
+}
+
+static void vmw_resource_release(struct kref *kref)
+{
+ struct vmw_resource *res =
+ container_of(kref, struct vmw_resource, kref);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ idr_remove(res->idr, res->id);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (likely(res->hw_destroy != NULL))
+ res->hw_destroy(res);
+
+ if (res->res_free != NULL)
+ res->res_free(res);
+ else
+ kfree(res);
+
+ write_lock(&dev_priv->resource_lock);
+}
+
+void vmw_resource_unreference(struct vmw_resource **p_res)
+{
+ struct vmw_resource *res = *p_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ *p_res = NULL;
+ write_lock(&dev_priv->resource_lock);
+ kref_put(&res->kref, vmw_resource_release);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+static int vmw_resource_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ struct idr *idr,
+ enum ttm_object_type obj_type,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ kref_init(&res->kref);
+ res->hw_destroy = NULL;
+ res->res_free = res_free;
+ res->res_type = obj_type;
+ res->idr = idr;
+ res->avail = false;
+ res->dev_priv = dev_priv;
+
+ do {
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ write_lock(&dev_priv->resource_lock);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
+ write_unlock(&dev_priv->resource_lock);
+
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_activate
+ *
+ * @res: Pointer to the newly created resource
+ * @hw_destroy: Destroy function. NULL if none.
+ *
+ * Activate a resource after the hardware has been made aware of it.
+ * Set tye destroy function to @destroy. Typically this frees the
+ * resource and destroys the hardware resources associated with it.
+ * Activate basically means that the function vmw_resource_lookup will
+ * find it.
+ */
+
+static void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ write_lock(&dev_priv->resource_lock);
+ res->avail = true;
+ res->hw_destroy = hw_destroy;
+ write_unlock(&dev_priv->resource_lock);
+}
+
+struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+ struct idr *idr, int id)
+{
+ struct vmw_resource *res;
+
+ read_lock(&dev_priv->resource_lock);
+ res = idr_find(idr, id);
+ if (res && res->avail)
+ kref_get(&res->kref);
+ else
+ res = NULL;
+ read_unlock(&dev_priv->resource_lock);
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ return res;
+}
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyContext body;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineContext body;
+ } *cmd;
+
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
+ VMW_RES_CONTEXT, res_free);
+
+ if (unlikely(ret != 0)) {
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ ret = vmw_context_init(dev_priv, res, NULL);
+ return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+
+ kfree(ctx);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_context *ctx =
+ container_of(base, struct vmw_user_context, base);
+ struct vmw_resource *res = &ctx->res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_resource *res;
+ struct vmw_user_context *ctx;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = 0;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_context_free) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx = container_of(res, struct vmw_user_context, res);
+ if (ctx->base.tfile != tfile && !ctx->base.shareable) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
+out:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if (unlikely(ctx == NULL))
+ return -ENOMEM;
+
+ res = &ctx->res;
+ ctx->base.shareable = false;
+ ctx->base.tfile = NULL;
+
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(&ctx->res);
+ ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+ &vmw_user_context_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->cid = res->id;
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+
+}
+
+int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id)
+{
+ struct vmw_resource *res;
+ int ret = 0;
+
+ read_lock(&dev_priv->resource_lock);
+ res = idr_find(&dev_priv->context_idr, id);
+ if (res && res->avail) {
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+ if (ctx->base.tfile != tfile && !ctx->base.shareable)
+ ret = -EPERM;
+ } else
+ ret = -EINVAL;
+ read_unlock(&dev_priv->resource_lock);
+
+ return ret;
+}
+
+
+/**
+ * Surface management.
+ */
+
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.sid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+}
+
+void vmw_surface_res_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ kfree(srf);
+}
+
+int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+ } *cmd;
+ SVGA3dSize *cmd_size;
+ struct vmw_resource *res = &srf->res;
+ struct drm_vmw_size *src_size;
+ size_t submit_size;
+ uint32_t cmd_len;
+ int i;
+
+ BUG_ON(res_free == NULL);
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
+ VMW_RES_SURFACE, res_free);
+
+ if (unlikely(ret != 0)) {
+ res_free(res);
+ return ret;
+ }
+
+ submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed for create surface.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
+ cmd->header.size = cpu_to_le32(cmd_len);
+ cmd->body.sid = cpu_to_le32(res->id);
+ cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ cmd->body.face[i].numMipLevels =
+ cpu_to_le32(srf->mip_levels[i]);
+ }
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = cpu_to_le32(src_size->width);
+ cmd_size->height = cpu_to_le32(src_size->height);
+ cmd_size->depth = cpu_to_le32(src_size->depth);
+ }
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return 0;
+}
+
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+ struct vmw_user_surface *user_srf =
+ container_of(srf, struct vmw_user_surface, srf);
+
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ kfree(user_srf);
+}
+
+int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_surface **out)
+{
+ struct vmw_resource *res;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ read_lock(&dev_priv->resource_lock);
+
+ if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ read_unlock(&dev_priv->resource_lock);
+ goto out_bad_resource;
+ }
+
+ kref_get(&res->kref);
+ read_unlock(&dev_priv->resource_lock);
+
+ *out = srf;
+ ret = 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_surface *user_srf =
+ container_of(base, struct vmw_user_surface, base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf =
+ kmalloc(sizeof(*user_srf), GFP_KERNEL);
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_surface_create_arg *arg =
+ (union drm_vmw_surface_create_arg *)data;
+ struct drm_vmw_surface_create_req *req = &arg->req;
+ struct drm_vmw_surface_arg *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct drm_vmw_size __user *user_sizes;
+ int ret;
+ int i;
+
+ if (unlikely(user_srf == NULL))
+ return -ENOMEM;
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->flags;
+ srf->format = req->format;
+ srf->scanout = req->scanout;
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ srf->num_sizes += srf->mip_levels[i];
+
+ if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS) {
+ ret = -EINVAL;
+ goto out_err0;
+ }
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr;
+
+ ret = copy_from_user(srf->sizes, user_sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ if (srf->scanout &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
+ user_srf->base.shareable = false;
+ user_srf->base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_base_object_init(tfile, &user_srf->base,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ return ret;
+ }
+
+ rep->sid = user_srf->base.hash.key;
+ if (rep->sid == SVGA3D_INVALID_ID)
+ DRM_ERROR("Created bad Surface ID.\n");
+
+ vmw_resource_unreference(&res);
+ return 0;
+out_err1:
+ kfree(srf->sizes);
+out_err0:
+ kfree(user_srf);
+ return ret;
+}
+
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_surface_reference_arg *arg =
+ (union drm_vmw_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_surface_create_req *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct drm_vmw_size __user *user_sizes;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+
+ ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_no_reference;
+ }
+
+ rep->flags = srf->flags;
+ rep->format = srf->format;
+ memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ rep->size_addr;
+
+ if (user_sizes)
+ ret = copy_to_user(user_sizes, srf->sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0))
+ DRM_ERROR("copy_to_user failed %p %u\n",
+ user_sizes, srf->num_sizes);
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id)
+{
+ struct ttm_base_object *base;
+ struct vmw_user_surface *user_srf;
+
+ int ret = -EPERM;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_surface;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ *id = user_srf->srf.res.id;
+ ret = 0;
+
+out_bad_surface:
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
+
+ ttm_base_object_unref(&base);
+ return ret;
+}
+
+/**
+ * Buffer management.
+ */
+
+static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
+ unsigned long num_pages)
+{
+ static size_t bo_user_size = ~0;
+
+ size_t page_array_size =
+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
+
+ if (unlikely(bo_user_size == ~0)) {
+ bo_user_size = glob->ttm_bo_extra_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ }
+
+ return bo_user_size + page_array_size;
+}
+
+void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+ struct vmw_private *dev_priv =
+ container_of(bo->bdev, struct vmw_private, bdev);
+
+ if (vmw_bo->gmr_bound) {
+ vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
+ spin_unlock(&glob->lru_lock);
+ vmw_bo->gmr_bound = false;
+ }
+}
+
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+
+ vmw_dmabuf_gmr_unbind(bo);
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ kfree(vmw_bo);
+}
+
+int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible,
+ void (*bo_free) (struct ttm_buffer_object *bo))
+{
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ size_t acc_size;
+ int ret;
+
+ BUG_ON(!bo_free);
+
+ acc_size =
+ vmw_dmabuf_acc_size(bdev->glob,
+ (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (unlikely(ret != 0)) {
+ /* we must free the bo here as
+ * ttm_buffer_object_init does so as well */
+ bo_free(&vmw_bo->base);
+ return ret;
+ }
+
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+ INIT_LIST_HEAD(&vmw_bo->gmr_lru);
+ INIT_LIST_HEAD(&vmw_bo->validate_list);
+ vmw_bo->gmr_id = 0;
+ vmw_bo->gmr_bound = false;
+
+ ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, 0, interruptible,
+ NULL, acc_size, bo_free);
+ return ret;
+}
+
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+
+ vmw_dmabuf_gmr_unbind(bo);
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ kfree(vmw_user_bo);
+}
+
+static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_base_object *base = *p_base;
+ struct ttm_buffer_object *bo;
+
+ *p_base = NULL;
+
+ if (unlikely(base == NULL))
+ return;
+
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ bo = &vmw_user_bo->dma.base;
+ ttm_bo_unref(&bo);
+}
+
+int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_buffer_object *tmp;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
+ if (unlikely(vmw_user_bo == NULL))
+ return -ENOMEM;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0)) {
+ kfree(vmw_user_bo);
+ return ret;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
+ ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
+ &vmw_user_bo->base,
+ false,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ } else {
+ rep->handle = vmw_user_bo->base.hash.key;
+ rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
+ rep->cur_gmr_id = vmw_user_bo->base.hash.key;
+ rep->cur_gmr_offset = 0;
+ }
+ ttm_bo_unref(&tmp);
+
+ ttm_read_unlock(&vmaster->lock);
+
+ return 0;
+}
+
+int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_unref_dmabuf_arg *arg =
+ (struct drm_vmw_unref_dmabuf_arg *)data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+ uint32_t cur_validate_node)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+
+ if (likely(vmw_bo->on_validate_list))
+ return vmw_bo->cur_validate_node;
+
+ vmw_bo->cur_validate_node = cur_validate_node;
+ vmw_bo->on_validate_list = true;
+
+ return cur_validate_node;
+}
+
+void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+
+ vmw_bo->on_validate_list = false;
+}
+
+uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo;
+
+ if (bo->mem.mem_type == TTM_PL_VRAM)
+ return SVGA_GMR_FRAMEBUFFER;
+
+ vmw_bo = vmw_dma_buffer(bo);
+
+ return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
+}
+
+void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ vmw_bo->gmr_bound = true;
+ vmw_bo->gmr_id = id;
+}
+
+int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_dma_buffer **out)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL)) {
+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -ESRCH;
+ }
+
+ if (unlikely(base->object_type != ttm_buffer_type)) {
+ ttm_base_object_unref(&base);
+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -EINVAL;
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ (void)ttm_bo_reference(&vmw_user_bo->dma.base);
+ ttm_base_object_unref(&base);
+ *out = &vmw_user_bo->dma;
+
+ return 0;
+}
+
+/**
+ * TODO: Implement a gmr id eviction mechanism. Currently we just fail
+ * when we're out of ids, causing GMR space to be allocated
+ * out of VRAM.
+ */
+
+int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
+{
+ struct ttm_bo_global *glob = dev_priv->bdev.glob;
+ int id;
+ int ret;
+
+ do {
+ if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ spin_lock(&glob->lru_lock);
+ ret = ida_get_new(&dev_priv->gmr_ida, &id);
+ spin_unlock(&glob->lru_lock);
+ } while (ret == -EAGAIN);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(id >= dev_priv->max_gmr_ids)) {
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, id);
+ spin_unlock(&glob->lru_lock);
+ return -EBUSY;
+ }
+
+ *p_id = (uint32_t) id;
+ return 0;
+}
+
+/*
+ * Stream managment
+ */
+
+static void vmw_stream_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_stream *stream;
+ int ret;
+
+ DRM_INFO("%s: unref\n", __func__);
+ stream = container_of(res, struct vmw_stream, res);
+
+ ret = vmw_overlay_unref(dev_priv, stream->stream_id);
+ WARN_ON(ret != 0);
+}
+
+static int vmw_stream_init(struct vmw_private *dev_priv,
+ struct vmw_stream *stream,
+ void (*res_free) (struct vmw_resource *res))
+{
+ struct vmw_resource *res = &stream->res;
+ int ret;
+
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
+ VMW_RES_STREAM, res_free);
+
+ if (unlikely(ret != 0)) {
+ if (res_free == NULL)
+ kfree(stream);
+ else
+ res_free(&stream->res);
+ return ret;
+ }
+
+ ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
+ if (ret) {
+ vmw_resource_unreference(&res);
+ return ret;
+ }
+
+ DRM_INFO("%s: claimed\n", __func__);
+
+ vmw_resource_activate(&stream->res, vmw_stream_destroy);
+ return 0;
+}
+
+/**
+ * User-space context management:
+ */
+
+static void vmw_user_stream_free(struct vmw_resource *res)
+{
+ struct vmw_user_stream *stream =
+ container_of(res, struct vmw_user_stream, stream.res);
+
+ kfree(stream);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_stream *stream =
+ container_of(base, struct vmw_user_stream, base);
+ struct vmw_resource *res = &stream->stream.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_resource *res;
+ struct vmw_user_stream *stream;
+ struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = 0;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_stream_free) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stream = container_of(res, struct vmw_user_stream, stream.res);
+ if (stream->base.tfile != tfile) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
+out:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if (unlikely(stream == NULL))
+ return -ENOMEM;
+
+ res = &stream->stream.res;
+ stream->base.shareable = false;
+ stream->base.tfile = NULL;
+
+ ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
+ &vmw_user_stream_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->stream_id = res->id;
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t *inout_id, struct vmw_resource **out)
+{
+ struct vmw_user_stream *stream;
+ struct vmw_resource *res;
+ int ret;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_stream_free) {
+ ret = -EINVAL;
+ goto err_ref;
+ }
+
+ stream = container_of(res, struct vmw_user_stream, stream.res);
+ if (stream->base.tfile != tfile) {
+ ret = -EPERM;
+ goto err_ref;
+ }
+
+ *inout_id = stream->stream.stream_id;
+ *out = res;
+ return 0;
+err_ref:
+ vmw_resource_unreference(&res);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 00000000000..e3df4adfb4d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vmw_private *dev_priv;
+
+ if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
+ if (vmw_fifo_mmap(filp, vma) == 0)
+ return 0;
+ return drm_mmap(filp, vma);
+ }
+
+ file_priv = (struct drm_file *)filp->private_data;
+ dev_priv = vmw_priv(file_priv->minor->dev);
+ return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+}
+
+static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+ DRM_INFO("global init.\n");
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+int vmw_ttm_global_init(struct vmw_private *dev_priv)
+{
+ struct ttm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &dev_priv->mem_global_ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vmw_ttm_mem_global_init;
+ global_ref->release = &vmw_ttm_mem_global_release;
+
+ ret = ttm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM memory accounting.\n");
+ return ret;
+ }
+
+ dev_priv->bo_global_ref.mem_glob =
+ dev_priv->mem_global_ref.object;
+ global_ref = &dev_priv->bo_global_ref.ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ ret = ttm_global_item_ref(global_ref);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM buffer objects.\n");
+ goto out_no_bo;
+ }
+
+ return 0;
+out_no_bo:
+ ttm_global_item_unref(&dev_priv->mem_global_ref);
+ return ret;
+}
+
+void vmw_ttm_global_release(struct vmw_private *dev_priv)
+{
+ ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
+ ttm_global_item_unref(&dev_priv->mem_global_ref);
+}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93603c..2f6cf69ecb3 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
remaining -= 7;
pr_devel("client 0x%p called 'target'\n", priv);
/* if target is default */
- if (!strncmp(buf, "default", 7))
+ if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
else {
if (!vga_pci_str_to_vars(curr_pos, remaining,