aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-26 14:18:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-26 14:18:18 -0700
commitbd22dc17e49973d3d4925970260e9e37f7580a9f (patch)
tree581a7c7527f628aa91eb2e0680b765a9673bc974
parent548ed10228093f1036297a333d1c1064f4daefdc (diff)
parent98c7b42375011ec37251e6fc85a0471cfe499eea (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "One of the smaller drm -next pulls in ages! Ben (nouveau) has a rewrite in progress but we decided to leave it stew for another cycle, so just some fixes from him. - radeon: lots of documentation work, fixes, more ring and locking changes, pcie gen2, more dp fixes. - i915: haswell features, gpu reset fixes, /dev/agpgart removal on machines that we never used it on, more VGA/HDP fix., more DP fixes - drm core: cleanups from Daniel, sis 64-bit fixes, range allocator colouring. but yeah fairly quiet merge this time, probably because I missed half of it!" Trivial add-add conflict in include/linux/pci_regs.h * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (255 commits) drm/nouveau: init vblank requests list drm/nv50: extend vblank semaphore to generic dmaobj + offset pair drm/nouveau: mark most of our ioctls as deprecated, move to compat layer drm/nouveau: move current gpuobj code out of nouveau_object.c drm/nouveau/gem: fix object reference leak in a failure path drm/nv50: rename INVALID_QUERY_OR_TEXTURE error to INVALID_OPERATION drm/nv84: decode PCRYPT errors drm/nouveau: dcb table quirk for fdo#50830 nouveau: Fix alignment requirements on src and dst addresses drm/i915: unbreak lastclose for failed driver init drm/i915: Set the context before setting up regs for the context. drm/i915: constify mode in crtc_mode_fixup drm/i915/lvds: ditch ->prepare special case drm/i915: dereferencing an error pointer drm/i915: fix invalid reference handling of the default ctx obj drm/i915: Add -EIO to the list of known errors for __wait_seqno drm/i915: Flush the context object from the CPU caches upon switching drm/radeon: fix dpms on/off on trinity/aruba v2 drm/radeon: on hotplug force link training to happen (v2) drm/radeon: fix hotplug of DP to DVI|HDMI passive adapters (v2) ...
-rw-r--r--drivers/char/agp/intel-agp.c16
-rw-r--r--drivers/char/agp/intel-agp.h3
-rw-r--r--drivers/char/agp/intel-gtt.c91
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c16
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_dma.c5
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_fops.c78
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_info.c38
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_lock.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c169
-rw-r--r--drivers/gpu/drm/drm_pci.c49
-rw-r--r--drivers/gpu/drm/drm_proc.c1
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c17
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h6
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c68
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c276
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h70
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c351
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c536
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c73
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c270
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h191
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c127
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h28
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c92
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c498
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c84
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h32
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c375
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c58
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c28
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c53
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c483
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c92
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c60
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c31
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c8
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c245
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h83
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c (renamed from drivers/gpu/drm/nouveau/nouveau_object.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c74
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c18
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc4
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h94
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h87
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c8
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c24
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c270
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c198
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c1191
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c21
-rw-r--r--drivers/gpu/drm/radeon/r420.c21
-rw-r--r--drivers/gpu/drm/radeon/r520.c18
-rw-r--r--drivers/gpu/drm/radeon/r600.c193
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c70
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h153
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c384
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c398
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c393
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c283
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c138
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c410
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c84
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c95
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c48
-rw-r--r--drivers/gpu/drm/radeon/rs400.c21
-rw-r--r--drivers/gpu/drm/radeon/rs600.c44
-rw-r--r--drivers/gpu/drm/radeon/rs690.c21
-rw-r--r--drivers/gpu/drm/radeon/rv515.c20
-rw-r--r--drivers/gpu/drm/radeon/rv770.c38
-rw-r--r--drivers/gpu/drm/radeon/si.c176
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c9
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c19
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c46
-rw-r--r--drivers/gpu/drm/via/via_drv.c4
-rw-r--r--drivers/gpu/drm/via/via_mm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c6
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c2
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c2
-rw-r--r--include/drm/drmP.h19
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_crtc_helper.h4
-rw-r--r--include/drm/drm_encoder_slave.h2
-rw-r--r--include/drm/drm_mm.h93
-rw-r--r--include/drm/drm_pciids.h42
-rw-r--r--include/drm/i915_drm.h34
-rw-r--r--include/drm/intel-gtt.h8
-rw-r--r--include/drm/nouveau_drm.h94
-rw-r--r--include/drm/sis_drm.h8
-rw-r--r--include/drm/ttm/ttm_bo_driver.h3
-rw-r--r--include/linux/pci_regs.h5
189 files changed, 7662 insertions, 3460 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 0a418527941..b130df0a195 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
+#include <drm/intel-gtt.h>
int intel_agp_enabled;
EXPORT_SYMBOL(intel_agp_enabled);
@@ -747,7 +748,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
bridge->capndx = cap_ptr;
- if (intel_gmch_probe(pdev, bridge))
+ if (intel_gmch_probe(pdev, NULL, bridge))
goto found_gmch;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -824,7 +825,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
agp_remove_bridge(bridge);
- intel_gmch_remove(pdev);
+ intel_gmch_remove();
agp_put_bridge(bridge);
}
@@ -902,17 +903,6 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
- ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
- ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
- ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
- ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
- ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
- ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
- ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 8e2d9140f30..57226424690 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -251,7 +251,4 @@
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
-int intel_gmch_probe(struct pci_dev *pdev,
- struct agp_bridge_data *bridge);
-void intel_gmch_remove(struct pci_dev *pdev);
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 1237e7575c3..9ed92ef5829 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -66,7 +66,6 @@ static struct _intel_private {
struct pci_dev *bridge_dev;
u8 __iomem *registers;
phys_addr_t gtt_bus_addr;
- phys_addr_t gma_bus_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
@@ -76,6 +75,7 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
+ int refcount;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@@ -648,6 +648,7 @@ static void intel_gtt_cleanup(void)
static int intel_gtt_init(void)
{
+ u32 gma_addr;
u32 gtt_map_size;
int ret;
@@ -694,6 +695,15 @@ static int intel_gtt_init(void)
return ret;
}
+ if (INTEL_GTT_GEN <= 2)
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
+ &gma_addr);
+ else
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
+ &gma_addr);
+
+ intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+
return 0;
}
@@ -767,20 +777,10 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static bool intel_enable_gtt(void)
+bool intel_enable_gtt(void)
{
- u32 gma_addr;
u8 __iomem *reg;
- if (INTEL_GTT_GEN <= 2)
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
- &gma_addr);
- else
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
- &gma_addr);
-
- intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
-
if (INTEL_GTT_GEN >= 6)
return true;
@@ -823,6 +823,7 @@ static bool intel_enable_gtt(void)
return true;
}
+EXPORT_SYMBOL(intel_enable_gtt);
static int i830_setup(void)
{
@@ -860,7 +861,7 @@ static int intel_fake_agp_configure(void)
return -EIO;
intel_private.clear_fake_agp = true;
- agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+ agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
return 0;
}
@@ -1182,9 +1183,17 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ else {
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ }
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
@@ -1244,6 +1253,7 @@ static int i9xx_setup(void)
switch (INTEL_GTT_GEN) {
case 5:
case 6:
+ case 7:
gtt_offset = MB(2);
break;
case 4:
@@ -1379,7 +1389,6 @@ static const struct intel_gtt_driver valleyview_gtt_driver = {
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
@@ -1523,14 +1532,32 @@ static int find_gmch(u16 device)
return 1;
}
-int intel_gmch_probe(struct pci_dev *pdev,
- struct agp_bridge_data *bridge)
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ struct agp_bridge_data *bridge)
{
int i, mask;
- intel_private.driver = NULL;
+
+ /*
+ * Can be called from the fake agp driver but also directly from
+ * drm/i915.ko. Hence we need to check whether everything is set up
+ * already.
+ */
+ if (intel_private.driver) {
+ intel_private.refcount++;
+ return 1;
+ }
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
- if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+ if (gpu_pdev) {
+ if (gpu_pdev->device ==
+ intel_gtt_chipsets[i].gmch_chip_id) {
+ intel_private.pcidev = pci_dev_get(gpu_pdev);
+ intel_private.driver =
+ intel_gtt_chipsets[i].gtt_driver;
+
+ break;
+ }
+ } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
intel_private.driver =
intel_gtt_chipsets[i].gtt_driver;
break;
@@ -1540,13 +1567,17 @@ int intel_gmch_probe(struct pci_dev *pdev,
if (!intel_private.driver)
return 0;
- bridge->driver = &intel_fake_agp_driver;
- bridge->dev_private_data = &intel_private;
- bridge->dev = pdev;
+ intel_private.refcount++;
+
+ if (bridge) {
+ bridge->driver = &intel_fake_agp_driver;
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = bridge_pdev;
+ }
- intel_private.bridge_dev = pci_dev_get(pdev);
+ intel_private.bridge_dev = pci_dev_get(bridge_pdev);
- dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+ dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
mask = intel_private.driver->dma_mask_size;
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
@@ -1556,11 +1587,11 @@ int intel_gmch_probe(struct pci_dev *pdev,
pci_set_consistent_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(mask));
- /*if (bridge->driver == &intel_810_driver)
- return 1;*/
+ if (intel_gtt_init() != 0) {
+ intel_gmch_remove();
- if (intel_gtt_init() != 0)
return 0;
+ }
return 1;
}
@@ -1579,12 +1610,16 @@ void intel_gtt_chipset_flush(void)
}
EXPORT_SYMBOL(intel_gtt_chipset_flush);
-void intel_gmch_remove(struct pci_dev *pdev)
+void intel_gmch_remove(void)
{
+ if (--intel_private.refcount)
+ return;
+
if (intel_private.pcidev)
pci_dev_put(intel_private.pcidev);
if (intel_private.bridge_dev)
pci_dev_put(intel_private.bridge_dev);
+ intel_private.driver = NULL;
}
EXPORT_SYMBOL(intel_gmch_remove);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 65f9d231af1..7282c081fb5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -460,8 +460,8 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
@@ -680,7 +680,7 @@ static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
}
static bool ast_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 100f6308c50..a44d31aa4e3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -97,7 +97,7 @@ static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
* to just pass that straight through, so this does nothing
*/
static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@@ -429,8 +429,8 @@ void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 348b367debe..b356c719f2f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -641,8 +641,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
@@ -704,7 +702,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -796,13 +793,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
order = drm_order(request->size);
size = 1 << order;
- DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
- request->count, request->size, size, order, dev->queue_count);
+ DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
+ request->count, request->size, size, order);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
@@ -904,7 +899,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -1019,8 +1013,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
@@ -1071,7 +1063,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -1177,8 +1168,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
@@ -1228,7 +1217,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1c7a1c0d3ed..70b13fc1939 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -46,7 +46,6 @@ static struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index cfb4e333ec0..08f5e5309b2 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -120,11 +120,6 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
- && waitqueue_active(&buf->dma_wait)) {
- wake_up_interruptible(&buf->dma_wait);
- }
}
/**
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8a9d0792e4e..9238de4009f 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -182,7 +182,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
int drm_lastclose(struct drm_device * dev)
{
struct drm_vma_entry *vma, *vma_temp;
- int i;
DRM_DEBUG("\n");
@@ -228,16 +227,6 @@ int drm_lastclose(struct drm_device * dev)
kfree(vma);
}
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
- for (i = 0; i < dev->queue_count; i++) {
- kfree(dev->queuelist[i]);
- dev->queuelist[i] = NULL;
- }
- kfree(dev->queuelist);
- dev->queuelist = NULL;
- }
- dev->queue_count = 0;
-
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET))
drm_dma_takedown(dev);
@@ -486,7 +475,7 @@ long drm_ioctl(struct file *filp,
kfree(kdata);
atomic_dec(&dev->ioctl_count);
if (retcode)
- DRM_DEBUG("ret = %x\n", retcode);
+ DRM_DEBUG("ret = %d\n", retcode);
return retcode;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5683b7fdd74..f546d1e8af8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -228,7 +228,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
int i, ret;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
- ret = drm_crtc_helper_set_config(mode_set);
+ ret = mode_set->crtc->funcs->set_config(mode_set);
if (ret)
error = true;
}
@@ -1353,7 +1353,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
- bool bound = false, crtcs_bound = false;
+ int bound = 0, crtcs_bound = 0;
struct drm_crtc *crtc;
if (!fb_helper->fb)
@@ -1362,12 +1362,12 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
- crtcs_bound = true;
+ crtcs_bound++;
if (crtc->fb == fb_helper->fb)
- bound = true;
+ bound++;
}
- if (!bound && crtcs_bound) {
+ if (bound < crtcs_bound) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&dev->mode_config.mutex);
return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 123de28f94e..5062eec673f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -75,10 +75,6 @@ static int drm_setup(struct drm_device * dev)
dev->sigdata.lock = NULL;
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
@@ -144,12 +140,12 @@ int drm_open(struct inode *inode, struct file *filp)
}
if (!retcode) {
mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
- else if (dev->dev_mapping != inode->i_mapping)
- retcode = -ENODEV;
- }
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = &inode->i_data;
+ /* ihold ensures nobody can remove inode with our i_data */
+ ihold(container_of(dev->dev_mapping, struct inode, i_data));
+ inode->i_mapping = dev->dev_mapping;
+ filp->f_mapping = dev->dev_mapping;
mutex_unlock(&dev->struct_mutex);
}
@@ -370,72 +366,16 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
-/*
- * Reclaim locked buffers; note that this may be a bad idea if the current
- * context doesn't have the hw lock...
- */
-static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
-{
- struct drm_file *file_priv = f->private_data;
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&file_priv->master->lock);
-
- /*
- * Wait for a while.
- */
- do {
- spin_lock_bh(&file_priv->master->lock.spinlock);
- locked = file_priv->master->lock.idle_has_lock;
- spin_unlock_bh(&file_priv->master->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, _end));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
- }
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-}
-
static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
- if (dev->driver->reclaim_buffers_locked &&
- file_priv->master->lock.hw_lock)
- drm_reclaim_locked_buffers(dev, filp);
-
- if (dev->driver->reclaim_buffers_idlelocked &&
- file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-
-
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
- }
}
static void drm_events_release(struct drm_file *file_priv)
@@ -505,6 +445,9 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master)
drm_master_release(dev, filp);
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_core_reclaim_buffers(dev, file_priv);
+
drm_events_release(file_priv);
if (dev->driver->driver_features & DRIVER_MODESET)
@@ -566,6 +509,9 @@ int drm_release(struct inode *inode, struct file *filp)
}
}
+ BUG_ON(dev->dev_mapping == NULL);
+ iput(container_of(dev->dev_mapping, struct inode, i_data));
+
/* drop the reference held my the file priv */
drm_master_put(&file_priv->master);
file_priv->is_master = 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d58e69da1fb..fbe0842038b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -354,7 +354,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
+ obj->size / PAGE_SIZE, 0, false);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index ab1162da70f..8928edbb94c 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -110,42 +110,6 @@ int drm_vm_info(struct seq_file *m, void *data)
}
/**
- * Called when "/proc/dri/.../queues" is read.
- */
-int drm_queues_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int i;
- struct drm_queue *q;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, " ctx/flags use fin"
- " blk/rw/rwf wait flushed queued"
- " locks\n\n");
- for (i = 0; i < dev->queue_count; i++) {
- q = dev->queuelist[i];
- atomic_inc(&q->use_count);
- seq_printf(m, "%5d/0x%03x %5d %5d"
- " %5d/%c%c/%c%c%c %5Zd\n",
- i,
- q->flags,
- atomic_read(&q->use_count),
- atomic_read(&q->finalization),
- atomic_read(&q->block_count),
- atomic_read(&q->block_read) ? 'r' : '-',
- atomic_read(&q->block_write) ? 'w' : '-',
- waitqueue_active(&q->read_queue) ? 'r' : '-',
- waitqueue_active(&q->write_queue) ? 'w' : '-',
- waitqueue_active(&q->flush_queue) ? 'f' : '-',
- DRM_BUFCOUNT(&q->waitlist));
- atomic_dec(&q->use_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
* Called when "/proc/dri/.../bufs" is read.
*/
int drm_bufs_info(struct seq_file *m, void *data)
@@ -235,7 +199,7 @@ int drm_clients_info(struct seq_file *m, void *data)
}
-int drm_gem_one_name_info(int id, void *ptr, void *data)
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct seq_file *m = data;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c798eeae0a0..03f16f352fe 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -974,7 +974,6 @@ EXPORT_SYMBOL(drm_vblank_off);
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
- * @post: post or pre mode set?
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
@@ -1037,6 +1036,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
if (!dev->num_crtcs)
return 0;
+ /* KMS drivers handle this internally */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
crtc = modeset->crtc;
if (crtc >= dev->num_crtcs)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 52115204169..32039553e17 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -70,10 +70,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
lock->context, task_pid_nr(current),
master->lock.hw_lock->lock, lock->flags);
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
- if (lock->context < 0)
- return -EINVAL;
-
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters++;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 961fb54f426..9bb82f7f006 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -118,45 +118,53 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
struct drm_mm *mm = hole_node->mm;
- unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (alignment)
- tmp = hole_start % alignment;
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
- if (!tmp) {
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
+
+ if (adj_start == hole_start) {
hole_node->hole_follows = 0;
- list_del_init(&hole_node->hole_stack);
- } else
- wasted = alignment - tmp;
+ list_del(&hole_node->hole_stack);
+ }
- node->start = hole_start + wasted;
+ node->start = adj_start;
node->size = size;
node->mm = mm;
+ node->color = color;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
- BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > adj_end);
+ node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
- } else {
- node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
int atomic)
{
struct drm_mm_node *node;
@@ -165,7 +173,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
if (unlikely(node == NULL))
return NULL;
- drm_mm_insert_helper(hole_node, node, size, alignment);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
return node;
}
@@ -181,11 +189,11 @@ int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ hole_node = drm_mm_search_free(mm, size, alignment, false);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment);
+ drm_mm_insert_helper(hole_node, node, size, alignment, 0);
return 0;
}
@@ -194,50 +202,57 @@ EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
+ unsigned long color,
unsigned long start, unsigned long end)
{
struct drm_mm *mm = hole_node->mm;
- unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (hole_start < start)
- wasted += start - hole_start;
- if (alignment)
- tmp = (hole_start + wasted) % alignment;
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
- if (tmp)
- wasted += alignment - tmp;
+ if (adj_start < start)
+ adj_start = start;
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
- if (!wasted) {
+ if (adj_start == hole_start) {
hole_node->hole_follows = 0;
- list_del_init(&hole_node->hole_stack);
+ list_del(&hole_node->hole_stack);
}
- node->start = hole_start + wasted;
+ node->start = adj_start;
node->size = size;
node->mm = mm;
+ node->color = color;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
- BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
+ node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
- } else {
- node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
@@ -248,7 +263,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node
if (unlikely(node == NULL))
return NULL;
- drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
return node;
@@ -267,11 +282,11 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
- start, end, 0);
+ start, end, false);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
start, end);
return 0;
@@ -336,27 +351,23 @@ EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
- unsigned wasted = 0;
-
if (end - start < size)
return 0;
if (alignment) {
unsigned tmp = start % alignment;
if (tmp)
- wasted = alignment - tmp;
- }
-
- if (end >= start + size + wasted) {
- return 1;
+ start += alignment - tmp;
}
- return 0;
+ return end >= start + size;
}
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match)
+struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -368,10 +379,17 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best_size = ~0UL;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry);
+
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
+
BUG_ON(!entry->hole_follows);
- if (!check_free_hole(drm_mm_hole_node_start(entry),
- drm_mm_hole_node_end(entry),
- size, alignment))
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
if (!best_match)
@@ -385,14 +403,15 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free);
-
-struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int best_match)
+EXPORT_SYMBOL(drm_mm_search_free_generic);
+
+struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -410,6 +429,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
end : drm_mm_hole_node_end(entry);
BUG_ON(!entry->hole_follows);
+
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
+
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -424,7 +450,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_in_range);
+EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -437,6 +463,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
+ new->color = old->color;
old->allocated = 0;
new->allocated = 1;
@@ -452,9 +479,12 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
-void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
- unsigned alignment)
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color)
{
+ mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
@@ -474,11 +504,14 @@ EXPORT_SYMBOL(drm_mm_init_scan);
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
-void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end)
{
+ mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
@@ -522,17 +555,21 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
+
+ adj_start = hole_start;
+ adj_end = hole_end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+
if (mm->scan_check_range) {
- adj_start = hole_start < mm->scan_start ?
- mm->scan_start : hole_start;
- adj_end = hole_end > mm->scan_end ?
- mm->scan_end : hole_end;
- } else {
- adj_start = hole_start;
- adj_end = hole_end;
+ if (adj_start < mm->scan_start)
+ adj_start = mm->scan_start;
+ if (adj_end > mm->scan_end)
+ adj_end = mm->scan_end;
}
- if (check_free_hole(adj_start , adj_end,
+ if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end;
@@ -616,6 +653,8 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+ mm->color_adjust = NULL;
+
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 13f3d936472..5320364582c 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -465,3 +465,52 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_pci_exit);
+
+int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
+{
+ struct pci_dev *root;
+ int pos;
+ u32 lnkcap, lnkcap2;
+
+ *mask = 0;
+ if (!dev->pdev)
+ return -EINVAL;
+
+ if (!pci_is_pcie(dev->pdev))
+ return -EINVAL;
+
+ root = dev->pdev->bus->self;
+
+ pos = pci_pcie_cap(root);
+ if (!pos)
+ return -EINVAL;
+
+ /* we've been informed via and serverworks don't make the cut */
+ if (root->vendor == PCI_VENDOR_ID_VIA ||
+ root->vendor == PCI_VENDOR_ID_SERVERWORKS)
+ return -EINVAL;
+
+ pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
+ pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
+
+ lnkcap &= PCI_EXP_LNKCAP_SLS;
+ lnkcap2 &= 0xfe;
+
+ if (lnkcap2) { /* PCIE GEN 3.0 */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *mask |= DRM_PCIE_SPEED_25;
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *mask |= DRM_PCIE_SPEED_50;
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *mask |= DRM_PCIE_SPEED_80;
+ } else {
+ if (lnkcap & 1)
+ *mask |= DRM_PCIE_SPEED_25;
+ if (lnkcap & 2)
+ *mask |= DRM_PCIE_SPEED_50;
+ }
+
+ DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
+ return 0;
+}
+EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index fff87221f9e..371c695322d 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -53,7 +53,6 @@ static struct drm_info_list drm_proc_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 45cf1dd3eb9..45ac8d6c92b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -134,6 +134,7 @@ void drm_sysfs_destroy(void)
return;
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
+ drm_class = NULL;
}
/**
@@ -554,6 +555,9 @@ void drm_sysfs_device_remove(struct drm_minor *minor)
int drm_class_device_register(struct device *dev)
{
+ if (!drm_class || IS_ERR(drm_class))
+ return -ENOENT;
+
dev->class = drm_class;
return device_register(dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 4afb625128d..32a34c85899 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -237,7 +237,7 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
static bool
exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c82c90c443e..277653d5fda 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -174,7 +174,7 @@ struct exynos_drm_manager_ops {
void (*apply)(struct device *subdrv_dev);
void (*mode_fixup)(struct device *subdrv_dev,
struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 23d5ad379f8..4a13a747f5d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -108,7 +108,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
static bool
exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 5d9d2c2f8f3..8ffcdf8b9e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -142,7 +142,7 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index bd8126996e5..a91c42088e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -51,7 +51,7 @@ struct exynos_hdmi_ops {
/* manager */
void (*mode_fixup)(void *ctx, struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(void *ctx, void *mode);
void (*get_max_resol)(void *ctx, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a137e9e39a3..066bde3f19c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1940,7 +1940,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
}
static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_display_mode *m;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 18742201860..8c175345d85 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -82,7 +82,7 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
}
static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index c3e9a0f701d..a68509ba22a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -913,7 +913,7 @@ static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
}
static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 88b59d4a7b7..a86f87b9ddd 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -90,7 +90,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
}
static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ff5b58eb878..c7f9468b74b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -270,7 +270,7 @@ static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
}
static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 973d7f6d66b..8d7caf0f363 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -427,7 +427,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
*
* Returns 0 on success, nonzero on failure.
*/
-bool psb_intel_init_bios(struct drm_device *dev)
+int psb_intel_init_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 0a738663eb5..2e95523b84b 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -431,7 +431,7 @@ struct bdb_driver_features {
u8 custom_vbt_version;
} __attribute__((packed));
-extern bool psb_intel_init_bios(struct drm_device *dev);
+extern int psb_intel_init_bios(struct drm_device *dev);
extern void psb_intel_destroy_bios(struct drm_device *dev);
/*
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index b34ff097b97..d4813e03f5e 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -684,7 +684,7 @@ void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
}
bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
index 6f762478b95..2b40663e169 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
@@ -65,7 +65,7 @@ extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/* MDFLD DPI helper functions */
extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 3f3cd619c79..dec6a9aea3c 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -117,7 +117,7 @@ static void psb_intel_crtc_commit(struct drm_crtc *crtc)
}
static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index f821c835ca9..cdafd2acc72 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -487,7 +487,7 @@ oaktrail_crtc_mode_set_exit:
}
static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index c10899c953b..2eb3dc4e9c9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -191,7 +191,7 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
}
static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index a8858a907f4..0c473743853 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -633,7 +633,6 @@ static struct drm_driver driver = {
.open = psb_driver_open,
.preclose = psb_driver_preclose,
.postclose = psb_driver_close,
- .reclaim_buffers = drm_core_reclaim_buffers,
.gem_init_object = psb_gem_init_object,
.gem_free_object = psb_gem_free_object,
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 36c3c99612f..30dc22a7156 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -543,7 +543,7 @@ void psb_intel_encoder_destroy(struct drm_encoder *encoder)
}
static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2515f83248c..ebe1a28f60e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -268,7 +268,7 @@ extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
*mode_cmd,
void *mm_private);
extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index c83f5b5d105..37adc9edf97 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -375,7 +375,7 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
}
bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index d39b15be764..0466c7b985f 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -901,7 +901,7 @@ static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
static bool
psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct psb_intel_sdvo_dtd output_dtd;
@@ -918,7 +918,7 @@ psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdv
static bool
psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* Reset the input timing to the screen. Assume always input 0. */
@@ -942,7 +942,7 @@ psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
}
static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index d3f2e878501..36d952280c5 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -88,7 +88,7 @@ static void ch7006_encoder_restore(struct drm_encoder *encoder)
}
static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index c860f24a5af..9b83574141a 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -172,7 +172,7 @@ struct ch7006_mode ch7006_modes[] = {
};
struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
- struct drm_display_mode *drm_mode)
+ const struct drm_display_mode *drm_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_mode *mode;
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index 17667b7d57e..09599f4c0c9 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -111,7 +111,7 @@ extern struct ch7006_tv_norm_info ch7006_tv_norms[];
extern struct ch7006_mode ch7006_modes[];
struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
- struct drm_display_mode *drm_mode);
+ const struct drm_display_mode *drm_mode);
void ch7006_setup_levels(struct drm_encoder *encoder);
void ch7006_setup_subcarrier(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index b7d45ab4ba6..30b8ae5e5c4 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -254,7 +254,7 @@ sil164_encoder_restore(struct drm_encoder *encoder)
static bool
sil164_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index fa9439159eb..57d892eaaa6 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -881,7 +881,7 @@ static int i810_flush_queue(struct drm_device *dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device *dev,
+void i810_driver_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
@@ -1220,12 +1220,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
}
-}
-void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- i810_reclaim_buffers(dev, file_priv);
+ if (file_priv->master && file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ i810_driver_reclaim_buffers(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+ } else {
+ /* master disappeared, clean up stuff anyway and hope nothing
+ * goes wrong */
+ i810_driver_reclaim_buffers(dev, file_priv);
+ }
+
}
int i810_driver_dma_quiescent(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index ec12f7dc717..f9924ad04d0 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,13 +57,12 @@ static const struct file_operations i810_driver_fops = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
+ DRIVER_HAVE_DMA,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
.device_is_agp = i810_driver_device_is_agp,
- .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
.fops = &i810_driver_fops,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c9339f48179..6e0acad9e0f 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
/* i810_dma.c */
extern int i810_driver_dma_quiescent(struct drm_device *dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
extern void i810_driver_lastclose(struct drm_device *dev);
extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2e9268da58d..b0bacdba6d7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,6 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \
i915_suspend.o \
i915_gem.o \
+ i915_gem_context.o \
i915_gem_debug.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 8c2ad014c47..58914691a77 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -86,7 +86,7 @@ struct intel_dvo_dev_ops {
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/*
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5363e9c66c2..359f6e8b9b0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -676,6 +676,7 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
@@ -713,6 +714,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, "CCID: 0x%08x\n", error->ccid);
for (i = 0; i < dev_priv->num_fence_regs; i++)
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -1765,6 +1767,64 @@ static const struct file_operations i915_max_freq_fops = {
};
static ssize_t
+i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof(buf),
+ "min freq: %d\n", dev_priv->min_delay * 50);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+
+ /*
+ * Turbo will still be enabled, but won't go below the set value.
+ */
+ dev_priv->min_delay = val / 50;
+
+ gen6_set_rps(dev, val / 50);
+
+ return cnt;
+}
+
+static const struct file_operations i915_min_freq_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_min_freq_read,
+ .write = i915_min_freq_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t
i915_cache_sharing_read(struct file *filp,
char __user *ubuf,
size_t max,
@@ -1997,6 +2057,12 @@ int i915_debugfs_init(struct drm_minor *minor)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_min_freq",
+ &i915_min_freq_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_cache_sharing",
&i915_cache_sharing_fops);
if (ret)
@@ -2028,6 +2094,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
+ 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 36822b924eb..9cf7dfe022b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1006,6 +1006,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
break;
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1082,8 +1085,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
- 4096);
+ dev_priv->dri1.gfx_hws_cpu_addr =
+ ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
@@ -1411,7 +1414,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return;
- ap->ranges[0].base = dev_priv->dev->agp->base;
+ ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
ap->ranges[0].size =
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
primary =
@@ -1467,11 +1470,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ ret = -EIO;
+ goto put_bridge;
+ }
+
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
- goto put_bridge;
+ goto put_gmch;
}
i915_kick_out_firmware_fb(dev_priv);
@@ -1498,19 +1508,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
- goto put_bridge;
+ goto put_gmch;
}
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base, aperture_size);
+ io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
+ aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
}
- i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
+ i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
+ aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -1534,7 +1547,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
intel_irq_init(dev);
+ intel_gt_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -1567,7 +1584,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
@@ -1586,8 +1602,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
- intel_detect_pch(dev);
-
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
@@ -1622,13 +1636,16 @@ out_gem_unload:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) {
- mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
+ mtrr_del(dev_priv->mm.gtt_mtrr,
+ dev_priv->mm.gtt_base_addr,
+ aperture_size);
dev_priv->mm.gtt_mtrr = -1;
}
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
+put_gmch:
+ intel_gmch_remove();
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1660,8 +1677,9 @@ int i915_driver_unload(struct drm_device *dev)
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
- mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
+ mtrr_del(dev_priv->mm.gtt_mtrr,
+ dev_priv->mm.gtt_base_addr,
+ dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
dev_priv->mm.gtt_mtrr = -1;
}
@@ -1702,6 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
@@ -1741,6 +1760,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
+ idr_init(&file_priv->context_idr);
+
return 0;
}
@@ -1760,7 +1781,13 @@ void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* On gen6+ we refuse to init without kms enabled, but then the drm core
+ * goes right around and calls lastclose. Check for this and don't clean
+ * up anything. */
+ if (!dev_priv)
+ return;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fb_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
@@ -1773,6 +1800,7 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
+ i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
}
@@ -1826,6 +1854,9 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9fe9ebe52a7..ed22612bc84 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/console.h>
@@ -215,7 +216,6 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
- .has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -223,7 +223,6 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
- .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -232,7 +231,6 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -243,7 +241,6 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -253,7 +250,6 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -264,7 +260,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -292,7 +287,6 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -302,7 +296,6 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -358,6 +351,9 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
+ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
{0, 0, 0}
};
@@ -429,135 +425,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return 1;
}
-void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
- udelay(10);
-
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(FORCEWAKE);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
- udelay(10);
-}
-
-void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
- udelay(10);
-
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(FORCEWAKE_MT);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
- udelay(10);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (dev_priv->forcewake_count++ == 0)
- dev_priv->display.force_wake_get(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
- gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
-}
-
-void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (--dev_priv->forcewake_count == 0)
- dev_priv->display.force_wake_put(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
- if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
- ++ret;
- dev_priv->gt_fifo_count = fifo;
- }
- dev_priv->gt_fifo_count--;
-
- return ret;
-}
-
-void vlv_force_wake_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
-
- /* Already awake? */
- if ((I915_READ(0x130094) & 0xa1) == 0xa1)
- return;
-
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
- POSTING_READ(FORCEWAKE_VLV);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
- udelay(10);
-}
-
-void vlv_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
- /* FIXME: confirm VLV behavior with Punit folks */
- POSTING_READ(FORCEWAKE_VLV);
-}
-
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -637,7 +504,7 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
@@ -794,9 +661,9 @@ static int gen6_do_reset(struct drm_device *dev)
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->forcewake_count)
- dev_priv->display.force_wake_get(dev_priv);
+ dev_priv->gt.force_wake_get(dev_priv);
else
- dev_priv->display.force_wake_put(dev_priv);
+ dev_priv->gt.force_wake_put(dev_priv);
/* Restore fifo count */
dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
@@ -805,7 +672,7 @@ static int gen6_do_reset(struct drm_device *dev)
return ret;
}
-static int intel_gpu_reset(struct drm_device *dev)
+int intel_gpu_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = -ENODEV;
@@ -863,10 +730,7 @@ int i915_reset(struct drm_device *dev)
if (!i915_try_reset)
return 0;
- if (!mutex_trylock(&dev->struct_mutex))
- return -EBUSY;
-
- dev_priv->stop_rings = 0;
+ mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
@@ -909,12 +773,16 @@ int i915_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
ring->init(ring);
+ i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * It would make sense to re-init all the other hw state, at
+ * least the rps/rc6/emon init done within modeset_init_hw. For
+ * some unknown reason, this blows up my ilk, so don't.
+ */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_modeset_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -925,10 +793,12 @@ int i915_reset(struct drm_device *dev)
return 0;
}
-
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct intel_device_info *intel_info =
+ (struct intel_device_info *) ent->driver_data;
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
@@ -937,6 +807,18 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
+ /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
+ * implementation for gen3 (and only gen3) that used legacy drm maps
+ * (gasp!) to share buffers between X and the client. Hence we need to
+ * keep around the fake agp stuff for gen3, even when kms is enabled. */
+ if (intel_info->gen != 3) {
+ driver.driver_features &=
+ ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
+ } else if (!intel_agp_enabled) {
+ DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
+ return -ENODEV;
+ }
+
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -1058,7 +940,6 @@ static struct drm_driver driver = {
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .reclaim_buffers = drm_core_reclaim_buffers,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
@@ -1097,11 +978,6 @@ static struct pci_driver i915_pci_driver = {
static int __init i915_init(void)
{
- if (!intel_agp_enabled) {
- DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
- return -ENODEV;
- }
-
driver.num_ioctls = i915_max_ioctl;
/*
@@ -1149,6 +1025,84 @@ MODULE_LICENSE("GPL and additional rights");
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
+static bool IS_DISPLAYREG(u32 reg)
+{
+ /*
+ * This should make it easier to transition modules over to the
+ * new register block scheme, since we can do it incrementally.
+ */
+ if (reg >= 0x180000)
+ return false;
+
+ if (reg >= RENDER_RING_BASE &&
+ reg < RENDER_RING_BASE + 0xff)
+ return false;
+ if (reg >= GEN6_BSD_RING_BASE &&
+ reg < GEN6_BSD_RING_BASE + 0xff)
+ return false;
+ if (reg >= BLT_RING_BASE &&
+ reg < BLT_RING_BASE + 0xff)
+ return false;
+
+ if (reg == PGTBL_ER)
+ return false;
+
+ if (reg >= IPEIR_I965 &&
+ reg < HWSTAM)
+ return false;
+
+ if (reg == MI_MODE)
+ return false;
+
+ if (reg == GFX_MODE_GEN7)
+ return false;
+
+ if (reg == RENDER_HWS_PGA_GEN7 ||
+ reg == BSD_HWS_PGA_GEN7 ||
+ reg == BLT_HWS_PGA_GEN7)
+ return false;
+
+ if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
+ reg == GEN6_BSD_RNCID)
+ return false;
+
+ if (reg == GEN6_BLITTER_ECOSKPD)
+ return false;
+
+ if (reg >= 0x4000c &&
+ reg <= 0x4002c)
+ return false;
+
+ if (reg >= 0x4f000 &&
+ reg <= 0x4f08f)
+ return false;
+
+ if (reg >= 0x4f100 &&
+ reg <= 0x4f11f)
+ return false;
+
+ if (reg >= VLV_MASTER_IER &&
+ reg <= GEN6_PMIER)
+ return false;
+
+ if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
+ reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
+ return false;
+
+ if (reg >= VLV_IIR_RW &&
+ reg <= VLV_ISR)
+ return false;
+
+ if (reg == FORCEWAKE_VLV ||
+ reg == FORCEWAKE_ACK_VLV)
+ return false;
+
+ if (reg == GEN6_GDRST)
+ return false;
+
+ return true;
+}
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
@@ -1156,11 +1110,13 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_get(dev_priv); \
+ dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_put(dev_priv); \
+ dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
+ } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
+ val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
@@ -1181,7 +1137,11 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- write##y(val, dev_priv->regs + reg); \
+ if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
+ write##y(val, dev_priv->regs + reg + 0x180000); \
+ } else { \
+ write##y(val, dev_priv->regs + reg); \
+ } \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b0b676abde0..627fe35781b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,6 +79,10 @@ enum port {
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ if ((intel_encoder)->base.crtc == (__crtc))
+
struct intel_pch_pll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
@@ -176,6 +180,7 @@ struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
+ u32 ccid;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
@@ -185,6 +190,7 @@ struct drm_i915_error_state {
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
@@ -261,8 +267,6 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -270,6 +274,11 @@ struct drm_i915_display_funcs {
/* pll clock increase/decrease */
};
+struct drm_i915_gt_funcs {
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
+};
+
struct intel_device_info {
u8 gen;
u8 is_mobile:1;
@@ -284,7 +293,6 @@ struct intel_device_info {
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
- u8 has_pch_split:1;
u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
@@ -309,6 +317,17 @@ struct i915_hw_ppgtt {
dma_addr_t scratch_page_dma_addr;
};
+
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_ID 0
+struct i915_hw_context {
+ int id;
+ bool is_initialized;
+ struct drm_i915_file_private *file_priv;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_object *obj;
+};
+
enum no_fbc_reason {
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
@@ -321,6 +340,7 @@ enum no_fbc_reason {
};
enum intel_pch {
+ PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
@@ -350,6 +370,8 @@ typedef struct drm_i915_private {
int relative_constants_mode;
void __iomem *regs;
+
+ struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
@@ -652,11 +674,14 @@ typedef struct drm_i915_private {
unsigned long gtt_end;
struct io_mapping *gtt_mapping;
+ phys_addr_t gtt_base_addr;
int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
+ u32 *l3_remap_info;
+
struct shrinker inactive_shrinker;
/**
@@ -817,6 +842,10 @@ typedef struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
+
+ struct work_struct parity_error_work;
+ bool hw_contexts_disabled;
+ uint32_t hw_context_size;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -1026,6 +1055,7 @@ struct drm_i915_file_private {
struct spinlock lock;
struct list_head request_list;
} mm;
+ struct idr context_idr;
};
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@@ -1071,7 +1101,8 @@ struct drm_i915_file_private {
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1094,13 +1125,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
@@ -1166,6 +1197,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
+extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -1178,6 +1210,7 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_gt_init(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
@@ -1237,6 +1270,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
@@ -1306,6 +1341,8 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible);
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1315,6 +1352,7 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
@@ -1323,8 +1361,8 @@ int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
-int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno);
+int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1358,6 +1396,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
+/* i915_gem_context.c */
+void i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct intel_ring_buffer *ring,
+ struct drm_file *file, int to_id);
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
@@ -1475,20 +1523,12 @@ extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_init_pch_refclk(struct drm_device *dev);
-extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
-extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
-
-extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
-extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
/* overlay */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 288d7b8f49a..5c4657a54f9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -96,9 +96,18 @@ i915_gem_wait_for_error(struct drm_device *dev)
if (!atomic_read(&dev_priv->mm.wedged))
return 0;
- ret = wait_for_completion_interruptible(x);
- if (ret)
+ /*
+ * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+ * userspace. If it takes that long something really bad is going on and
+ * we should simply try to bail out and fail as gracefully as possible.
+ */
+ ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
+ if (ret == 0) {
+ DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+ return -EIO;
+ } else if (ret < 0) {
return ret;
+ }
if (atomic_read(&dev_priv->mm.wedged)) {
/* GPU is hung, bump the completion count to account for
@@ -1122,7 +1131,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1132,6 +1141,11 @@ unlock:
out:
switch (ret) {
case -EIO:
+ /* If this -EIO is due to a gpu hang, give the reset code a
+ * chance to clean up the mess. Otherwise return the proper
+ * SIGBUS. */
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
* objects off the GPU active list. Next time we service the
@@ -1568,6 +1582,21 @@ i915_add_request(struct intel_ring_buffer *ring,
int was_empty;
int ret;
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ if (ring->gpu_caches_dirty) {
+ ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
+ }
+
BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring);
@@ -1613,6 +1642,9 @@ i915_add_request(struct intel_ring_buffer *ring,
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
+
+ WARN_ON(!list_empty(&ring->gpu_write_list));
+
return 0;
}
@@ -1827,14 +1859,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
idle = true;
for_each_ring(ring, dev_priv, i) {
- if (!list_empty(&ring->gpu_write_list)) {
+ if (ring->gpu_caches_dirty) {
struct drm_i915_gem_request *request;
- int ret;
- ret = i915_gem_flush_ring(ring,
- 0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (ret || request == NULL ||
+ if (request == NULL ||
i915_add_request(ring, NULL, request))
kfree(request);
}
@@ -1848,11 +1877,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-static int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv)
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible)
{
- BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
-
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
@@ -1863,7 +1891,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv)
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
- return recovery_complete ? -EIO : -EAGAIN;
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but still wedged means reset failure. */
+ if (recovery_complete)
+ return -EIO;
+
+ return -EAGAIN;
}
return 0;
@@ -1899,34 +1936,85 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
return ret;
}
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
- bool interruptible)
+ bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int ret = 0;
+ struct timespec before, now, wait_time={1,0};
+ unsigned long timeout_jiffies;
+ long end;
+ bool wait_forever = true;
+ int ret;
if (i915_seqno_passed(ring->get_seqno(ring), seqno))
return 0;
trace_i915_gem_request_wait_begin(ring, seqno);
+
+ if (timeout != NULL) {
+ wait_time = *timeout;
+ wait_forever = false;
+ }
+
+ timeout_jiffies = timespec_to_jiffies(&wait_time);
+
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
+ /* Record current time in case interrupted by signal, or wedged * */
+ getrawmonotonic(&before);
+
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
atomic_read(&dev_priv->mm.wedged))
+ do {
+ if (interruptible)
+ end = wait_event_interruptible_timeout(ring->irq_queue,
+ EXIT_COND,
+ timeout_jiffies);
+ else
+ end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+ timeout_jiffies);
- if (interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- EXIT_COND);
- else
- wait_event(ring->irq_queue, EXIT_COND);
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ end = ret;
+ } while (end == 0 && wait_forever);
+
+ getrawmonotonic(&now);
ring->irq_put(ring);
trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND
- return ret;
+ if (timeout) {
+ struct timespec sleep_time = timespec_sub(now, before);
+ *timeout = timespec_sub(*timeout, sleep_time);
+ }
+
+ switch (end) {
+ case -EIO:
+ case -EAGAIN: /* Wedged */
+ case -ERESTARTSYS: /* Signal */
+ return (int)end;
+ case 0: /* Timeout */
+ if (timeout)
+ set_normalized_timespec(timeout, 0, 0);
+ return -ETIME;
+ default: /* Completed */
+ WARN_ON(end < 0); /* We're not aware of other errors */
+ return 0;
+ }
}
/**
@@ -1934,15 +2022,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
* request and object lists appropriately for that event.
*/
int
-i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno)
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret = 0;
BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv);
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
if (ret)
return ret;
@@ -1950,9 +2037,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
- if (atomic_read(&dev_priv->mm.wedged))
- ret = -EAGAIN;
+ ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
return ret;
}
@@ -1975,7 +2060,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
i915_gem_retire_requests_ring(obj->ring);
@@ -1985,6 +2070,115 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
}
/**
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
+ */
+static int
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->active) {
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(obj->ring,
+ obj->last_rendering_seqno);
+ if (ret)
+ return ret;
+ i915_gem_retire_requests_ring(obj->ring);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ * -ETIME: object is still busy after timeout
+ * -ERESTARTSYS: signal interrupted the wait
+ * -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ * -EAGAIN: GPU wedged
+ * -ENOMEM: damn
+ * -ENODEV: Internal IRQ fail
+ * -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_gem_wait *args = data;
+ struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring = NULL;
+ struct timespec timeout_stack, *timeout = NULL;
+ u32 seqno = 0;
+ int ret = 0;
+
+ if (args->timeout_ns >= 0) {
+ timeout_stack = ns_to_timespec(args->timeout_ns);
+ timeout = &timeout_stack;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+ if (&obj->base == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOENT;
+ }
+
+ /* Need to make sure the object gets inactive eventually. */
+ ret = i915_gem_object_flush_active(obj);
+ if (ret)
+ goto out;
+
+ if (obj->active) {
+ seqno = obj->last_rendering_seqno;
+ ring = obj->ring;
+ }
+
+ if (seqno == 0)
+ goto out;
+
+ /* Do this after OLR check to make sure we make forward progress polling
+ * on this IOCTL with a 0 timeout (like busy ioctl)
+ */
+ if (!args->timeout_ns) {
+ ret = -ETIME;
+ goto out;
+ }
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = __wait_seqno(ring, seqno, true, timeout);
+ if (timeout) {
+ WARN_ON(!timespec_valid(timeout));
+ args->timeout_ns = timespec_to_ns(timeout);
+ }
+ return ret;
+
+out:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
* i915_gem_object_sync - sync an object to a ring.
*
* @obj: object which may be in use on another ring.
@@ -2160,7 +2354,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+ return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
int i915_gpu_idle(struct drm_device *dev)
@@ -2178,6 +2372,10 @@ int i915_gpu_idle(struct drm_device *dev)
/* Is the device fubar? */
if (WARN_ON(!list_empty(&ring->gpu_write_list)))
return -EBUSY;
+
+ ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+ if (ret)
+ return ret;
}
return 0;
@@ -2364,7 +2562,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
}
if (obj->last_fenced_seqno) {
- ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+ ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
@@ -2551,8 +2749,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (map_and_fenceable)
free_space =
drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
+ size, alignment,
+ 0, dev_priv->mm.gtt_mappable_end,
0);
else
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
@@ -2563,7 +2761,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
+ 0, dev_priv->mm.gtt_mappable_end,
0);
else
obj->gtt_space =
@@ -3030,7 +3228,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, true);
+ ret = __wait_seqno(ring, seqno, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3199,30 +3397,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- args->busy = obj->active;
- if (args->busy) {
- /* Unconditionally flush objects, even when the gpu still uses this
- * object. Userspace calling this function indicates that it wants to
- * use this buffer rather sooner than later, so issuing the required
- * flush earlier is beneficial.
- */
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- } else {
- ret = i915_gem_check_olr(obj->ring,
- obj->last_rendering_seqno);
- }
+ ret = i915_gem_object_flush_active(obj);
- /* Update the active list for the hardware's current position.
- * Otherwise this only updates on a delayed timer or when irqs
- * are actually unmasked, and our working set ends up being
- * larger than required.
- */
- i915_gem_retire_requests_ring(obj->ring);
-
- args->busy = obj->active;
- }
+ args->busy = obj->active;
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3435,6 +3612,38 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+void i915_gem_l3_remap(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 misccpctl;
+ int i;
+
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ if (!dev_priv->mm.l3_remap_info)
+ return;
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+ u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
+ if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+ DRM_DEBUG("0x%x was already programmed to %x\n",
+ GEN7_L3LOG_BASE + i, remap);
+ if (remap && !dev_priv->mm.l3_remap_info[i/4])
+ DRM_DEBUG_DRIVER("Clearing remapped register\n");
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+ }
+
+ /* Make sure all the writes land before disabling dop clock gating */
+ POSTING_READ(GEN7_L3LOG_BASE);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
void i915_gem_init_swizzling(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3518,12 +3727,33 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
}
}
+static bool
+intel_enable_blt(struct drm_device *dev)
+{
+ if (!HAS_BLT(dev))
+ return false;
+
+ /* The blitter was dysfunctional on early prototypes */
+ if (IS_GEN6(dev) && dev->pdev->revision < 8) {
+ DRM_INFO("BLT not supported on this pre-production hardware;"
+ " graphics performance will be degraded.\n");
+ return false;
+ }
+
+ return true;
+}
+
int
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ if (!intel_enable_gtt())
+ return -EIO;
+
+ i915_gem_l3_remap(dev);
+
i915_gem_init_swizzling(dev);
ret = intel_init_render_ring_buffer(dev);
@@ -3536,7 +3766,7 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_render_ring;
}
- if (HAS_BLT(dev)) {
+ if (intel_enable_blt(dev)) {
ret = intel_init_blt_ring_buffer(dev);
if (ret)
goto cleanup_bsd_ring;
@@ -3544,6 +3774,11 @@ i915_gem_init_hw(struct drm_device *dev)
dev_priv->next_seqno = 1;
+ /*
+ * XXX: There was some w/a described somewhere suggesting loading
+ * contexts before PPGTT.
+ */
+ i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
new file mode 100644
index 00000000000..da8b01fb1bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright © 2011-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+/*
+ * This file implements HW context support. On gen5+ a HW context consists of an
+ * opaque GPU object which is referenced at times of context saves and restores.
+ * With RC6 enabled, the context is also referenced as the GPU enters and exists
+ * from RC6 (GPU has it's own internal power context, except on gen5). Though
+ * something like a context does exist for the media ring, the code only
+ * supports contexts for the render ring.
+ *
+ * In software, there is a distinction between contexts created by the user,
+ * and the default HW context. The default HW context is used by GPU clients
+ * that do not request setup of their own hardware context. The default
+ * context's state is never restored to help prevent programming errors. This
+ * would happen if a client ran and piggy-backed off another clients GPU state.
+ * The default context only exists to give the GPU some offset to load as the
+ * current to invoke a save of the context we actually care about. In fact, the
+ * code could likely be constructed, albeit in a more complicated fashion, to
+ * never use the default context, though that limits the driver's ability to
+ * swap out, and/or destroy other contexts.
+ *
+ * All other contexts are created as a request by the GPU client. These contexts
+ * store GPU state, and thus allow GPU clients to not re-emit state (and
+ * potentially query certain state) at any time. The kernel driver makes
+ * certain that the appropriate commands are inserted.
+ *
+ * The context life cycle is semi-complicated in that context BOs may live
+ * longer than the context itself because of the way the hardware, and object
+ * tracking works. Below is a very crude representation of the state machine
+ * describing the context life.
+ * refcount pincount active
+ * S0: initial state 0 0 0
+ * S1: context created 1 0 0
+ * S2: context is currently running 2 1 X
+ * S3: GPU referenced, but not current 2 0 1
+ * S4: context is current, but destroyed 1 1 0
+ * S5: like S3, but destroyed 1 0 1
+ *
+ * The most common (but not all) transitions:
+ * S0->S1: client creates a context
+ * S1->S2: client submits execbuf with context
+ * S2->S3: other clients submits execbuf with context
+ * S3->S1: context object was retired
+ * S3->S2: clients submits another execbuf
+ * S2->S4: context destroy called with current context
+ * S3->S5->S0: destroy path
+ * S4->S5->S0: destroy path on current context
+ *
+ * There are two confusing terms used above:
+ * The "current context" means the context which is currently running on the
+ * GPU. The GPU has loaded it's state already and has stored away the gtt
+ * offset of the BO. The GPU is not actively referencing the data at this
+ * offset, but it will on the next context switch. The only way to avoid this
+ * is to do a GPU reset.
+ *
+ * An "active context' is one which was previously the "current context" and is
+ * on the active list waiting for the next context switch to occur. Until this
+ * happens, the object must remain at the same gtt offset. It is therefore
+ * possible to destroy a context, but it is still active.
+ *
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* This is a HW constraint. The value below is the largest known requirement
+ * I've seen in a spec to date, and that was a workaround for a non-shipping
+ * part. It should be safe to decrease this, but it's more future proof as is.
+ */
+#define CONTEXT_ALIGN (64<<10)
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
+static int do_switch(struct drm_i915_gem_object *from_obj,
+ struct i915_hw_context *to, u32 seqno);
+
+static int get_context_size(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ u32 reg;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ reg = I915_READ(CXT_SIZE);
+ ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ case 7:
+ reg = I915_READ(GEN7_CXT_SIZE);
+ ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static void do_destroy(struct i915_hw_context *ctx)
+{
+ struct drm_device *dev = ctx->obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (ctx->file_priv)
+ idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ else
+ BUG_ON(ctx != dev_priv->ring[RCS].default_context);
+
+ drm_gem_object_unreference(&ctx->obj->base);
+ kfree(ctx);
+}
+
+static struct i915_hw_context *
+create_hw_context(struct drm_device *dev,
+ struct drm_i915_file_private *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_context *ctx;
+ int ret, id;
+
+ ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+ if (ctx == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ if (ctx->obj == NULL) {
+ kfree(ctx);
+ DRM_DEBUG_DRIVER("Context object allocated failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* The ring associated with the context object is handled by the normal
+ * object tracking code. We give an initial ring value simple to pass an
+ * assertion in the context switch code.
+ */
+ ctx->ring = &dev_priv->ring[RCS];
+
+ /* Default context will never have a file_priv */
+ if (file_priv == NULL)
+ return ctx;
+
+ ctx->file_priv = file_priv;
+
+again:
+ if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ DRM_DEBUG_DRIVER("idr allocation failed\n");
+ goto err_out;
+ }
+
+ ret = idr_get_new_above(&file_priv->context_idr, ctx,
+ DEFAULT_CONTEXT_ID + 1, &id);
+ if (ret == 0)
+ ctx->id = id;
+
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ goto err_out;
+
+ return ctx;
+
+err_out:
+ do_destroy(ctx);
+ return ERR_PTR(ret);
+}
+
+static inline bool is_default_context(struct i915_hw_context *ctx)
+{
+ return (ctx == ctx->ring->default_context);
+}
+
+/**
+ * The default context needs to exist per ring that uses contexts. It stores the
+ * context state of the GPU for applications that don't utilize HW contexts, as
+ * well as an idle case.
+ */
+static int create_default_context(struct drm_i915_private *dev_priv)
+{
+ struct i915_hw_context *ctx;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+ ctx = create_hw_context(dev_priv->dev, NULL);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ /* We may need to do things with the shrinker which require us to
+ * immediately switch back to the default context. This can cause a
+ * problem as pinning the default context also requires GTT space which
+ * may not be available. To avoid this we always pin the
+ * default context.
+ */
+ dev_priv->ring[RCS].default_context = ctx;
+ ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
+ if (ret) {
+ do_destroy(ctx);
+ return ret;
+ }
+
+ ret = do_switch(NULL, ctx, 0);
+ if (ret) {
+ i915_gem_object_unpin(ctx->obj);
+ do_destroy(ctx);
+ } else {
+ DRM_DEBUG_DRIVER("Default HW context loaded\n");
+ }
+
+ return ret;
+}
+
+void i915_gem_context_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ctx_size;
+
+ if (!HAS_HW_CONTEXTS(dev)) {
+ dev_priv->hw_contexts_disabled = true;
+ return;
+ }
+
+ /* If called from reset, or thaw... we've been here already */
+ if (dev_priv->hw_contexts_disabled ||
+ dev_priv->ring[RCS].default_context)
+ return;
+
+ ctx_size = get_context_size(dev);
+ dev_priv->hw_context_size = get_context_size(dev);
+ dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
+
+ if (ctx_size <= 0 || ctx_size > (1<<20)) {
+ dev_priv->hw_contexts_disabled = true;
+ return;
+ }
+
+ if (create_default_context(dev_priv)) {
+ dev_priv->hw_contexts_disabled = true;
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("HW context support initialized\n");
+}
+
+void i915_gem_context_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->hw_contexts_disabled)
+ return;
+
+ /* The only known way to stop the gpu from accessing the hw context is
+ * to reset it. Do this as the very last operation to avoid confusing
+ * other code, leading to spurious errors. */
+ intel_gpu_reset(dev);
+
+ i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
+
+ do_destroy(dev_priv->ring[RCS].default_context);
+}
+
+static int context_idr_cleanup(int id, void *p, void *data)
+{
+ struct i915_hw_context *ctx = p;
+
+ BUG_ON(id == DEFAULT_CONTEXT_ID);
+
+ do_destroy(ctx);
+
+ return 0;
+}
+
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ idr_destroy(&file_priv->context_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
+{
+ return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+}
+
+static inline int
+mi_set_context(struct intel_ring_buffer *ring,
+ struct i915_hw_context *new_context,
+ u32 hw_flags)
+{
+ int ret;
+
+ /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
+ * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
+ * explicitly, so we rely on the value at ring init, stored in
+ * itlb_before_ctx_switch.
+ */
+ if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
+ ret = ring->flush(ring, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ if (IS_GEN7(ring->dev))
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, new_context->obj->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ hw_flags);
+ /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
+ intel_ring_emit(ring, MI_NOOP);
+
+ if (IS_GEN7(ring->dev))
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return ret;
+}
+
+static int do_switch(struct drm_i915_gem_object *from_obj,
+ struct i915_hw_context *to,
+ u32 seqno)
+{
+ struct intel_ring_buffer *ring = NULL;
+ u32 hw_flags = 0;
+ int ret;
+
+ BUG_ON(to == NULL);
+ BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
+
+ ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+ if (ret)
+ return ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+ * that thanks to write = false in this call and us not setting any gpu
+ * write domains when putting a context object onto the active list
+ * (when switching away from it), this won't block.
+ * XXX: We need a real interface to do this instead of trickery. */
+ ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
+ return ret;
+ }
+
+ if (!to->obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+
+ if (!to->is_initialized || is_default_context(to))
+ hw_flags |= MI_RESTORE_INHIBIT;
+ else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
+ hw_flags |= MI_FORCE_RESTORE;
+
+ ring = to->ring;
+ ret = mi_set_context(ring, to, hw_flags);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
+ return ret;
+ }
+
+ /* The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. In fact, the below code
+ * is a bit suboptimal because the retiring can occur simply after the
+ * MI_SET_CONTEXT instead of when the next seqno has completed.
+ */
+ if (from_obj != NULL) {
+ from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_gem_object_move_to_active(from_obj, ring, seqno);
+ /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
+ * whole damn pipeline, we don't need to explicitly mark the
+ * object dirty. The only exception is that the context must be
+ * correct in case the object gets swapped out. Ideally we'd be
+ * able to defer doing this until we know the object would be
+ * swapped, but there is no way to do that yet.
+ */
+ from_obj->dirty = 1;
+ BUG_ON(from_obj->ring != to->ring);
+ i915_gem_object_unpin(from_obj);
+
+ drm_gem_object_unreference(&from_obj->base);
+ }
+
+ drm_gem_object_reference(&to->obj->base);
+ ring->last_context_obj = to->obj;
+ to->is_initialized = true;
+
+ return 0;
+}
+
+/**
+ * i915_switch_context() - perform a GPU context switch.
+ * @ring: ring for which we'll execute the context switch
+ * @file_priv: file_priv associated with the context, may be NULL
+ * @id: context id number
+ * @seqno: sequence number by which the new context will be switched to
+ * @flags:
+ *
+ * The context life cycle is simple. The context refcount is incremented and
+ * decremented by 1 and create and destroy. If the context is in use by the GPU,
+ * it will have a refoucnt > 1. This allows us to destroy the context abstract
+ * object while letting the normal object tracking destroy the backing BO.
+ */
+int i915_switch_context(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ int to_id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_file_private *file_priv = NULL;
+ struct i915_hw_context *to;
+ struct drm_i915_gem_object *from_obj = ring->last_context_obj;
+ int ret;
+
+ if (dev_priv->hw_contexts_disabled)
+ return 0;
+
+ if (ring != &dev_priv->ring[RCS])
+ return 0;
+
+ if (file)
+ file_priv = file->driver_priv;
+
+ if (to_id == DEFAULT_CONTEXT_ID) {
+ to = ring->default_context;
+ } else {
+ to = i915_gem_context_get(file_priv, to_id);
+ if (to == NULL)
+ return -ENOENT;
+ }
+
+ if (from_obj == to->obj)
+ return 0;
+
+ return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_context_create *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_context *ctx;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ if (dev_priv->hw_contexts_disabled)
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = create_hw_context(dev, file_priv);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ args->ctx_id = ctx->id;
+ DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+
+ return 0;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_context_destroy *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_context *ctx;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (!ctx) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOENT;
+ }
+
+ do_destroy(ctx);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index a4f6aaabca9..bddf7bed183 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -132,7 +132,8 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
__func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
- gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
+ gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+ obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ae7c24e12e5..eba0308f10e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -78,11 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
- alignment, 0,
- dev_priv->mm.gtt_mappable_end);
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ 0, dev_priv->mm.gtt_mappable_end);
else
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ drm_mm_init_scan(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 974a9f1068a..5af631e788c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -810,33 +810,16 @@ err:
return ret;
}
-static int
+static void
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
- uint32_t flush_domains,
- uint32_t flush_rings)
+ uint32_t flush_domains)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i, ret;
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
-
- if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
- for (i = 0; i < I915_NUM_RINGS; i++)
- if (flush_rings & (1 << i)) {
- ret = i915_gem_flush_ring(&dev_priv->ring[i],
- invalidate_domains,
- flush_domains);
- if (ret)
- return ret;
- }
- }
-
- return 0;
}
static int
@@ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
- ret = i915_gem_execbuffer_flush(ring->dev,
- cd.invalidate_domains,
- cd.flush_domains,
- cd.flush_rings);
- if (ret)
- return ret;
+ i915_gem_execbuffer_flush(ring->dev,
+ cd.invalidate_domains,
+ cd.flush_domains);
}
if (cd.flips) {
@@ -905,6 +885,16 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
}
+ /* Unconditionally invalidate gpu caches and ensure that we do flush
+ * any residual writes from the previous batch.
+ */
+ ret = i915_gem_flush_ring(ring,
+ I915_GEM_GPU_DOMAINS,
+ ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
return 0;
}
@@ -983,26 +973,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
- u32 invalidate;
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires.
- *
- * The sampler always gets flushed on i965 (sigh).
- */
- invalidate = I915_GEM_DOMAIN_COMMAND;
- if (INTEL_INFO(dev)->gen >= 4)
- invalidate |= I915_GEM_DOMAIN_SAMPLER;
- if (ring->flush(ring, invalidate, 0)) {
- i915_gem_next_request_seqno(ring);
- return;
- }
+ /* Unconditionally force add_request to emit a full flush. */
+ ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
- i915_gem_next_request_seqno(ring);
kfree(request);
}
}
@@ -1044,6 +1021,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
+ u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
@@ -1065,9 +1043,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
+ if (ctx_id != 0) {
+ DRM_DEBUG("Ring %s doesn't support contexts\n",
+ ring->name);
+ return -EPERM;
+ }
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
+ if (ctx_id != 0) {
+ DRM_DEBUG("Ring %s doesn't support contexts\n",
+ ring->name);
+ return -EPERM;
+ }
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1240,6 +1228,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ ret = i915_switch_context(ring, file, ctx_id);
+ if (ret)
+ goto err;
+
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
@@ -1367,6 +1359,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
+ i915_execbuffer2_set_context_id(exec2, 0);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ed3224c3742..8a3828528b9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -375,6 +375,86 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+static void ivybridge_parity_work(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ parity_error_work);
+ u32 error_status, row, bank, subbank;
+ char *parity_event[5];
+ uint32_t misccpctl;
+ unsigned long flags;
+
+ /* We must turn off DOP level clock gating to access the L3 registers.
+ * In order to prevent a get/put style interface, acquire struct mutex
+ * any time we access those registers.
+ */
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ error_status = I915_READ(GEN7_L3CDERRST1);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+ I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
+ GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(GEN7_L3CDERRST1);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ parity_event[0] = "L3_PARITY_ERROR=1";
+ parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+ parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+ parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+ parity_event[4] = NULL;
+
+ kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
+ KOBJ_CHANGE, parity_event);
+
+ DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
+ row, bank, subbank);
+
+ kfree(parity_event[3]);
+ kfree(parity_event[2]);
+ kfree(parity_event[1]);
+}
+
+static void ivybridge_handle_parity_error(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long flags;
+
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->parity_error_work);
+}
+
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
@@ -394,6 +474,9 @@ static void snb_gt_irq_handler(struct drm_device *dev,
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
}
+
+ if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+ ivybridge_handle_parity_error(dev);
}
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
@@ -429,15 +512,10 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
- u32 vblank_status;
- int vblank = 0;
bool blc_event;
atomic_inc(&dev_priv->irq_received);
- vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS;
-
while (true) {
iir = I915_READ(VLV_IIR);
gt_iir = I915_READ(GTIIR);
@@ -467,6 +545,16 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+ }
+
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
@@ -481,19 +569,6 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
I915_READ(PORT_HOTPLUG_STAT);
}
-
- if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
- drm_handle_vblank(dev, 0);
- vblank++;
- intel_finish_page_flip(dev, 0);
- }
-
- if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
- drm_handle_vblank(dev, 1);
- vblank++;
- intel_finish_page_flip(dev, 0);
- }
-
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -991,6 +1066,7 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -1104,6 +1180,7 @@ static void i915_capture_error_state(struct drm_device *dev)
kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->ccid = I915_READ(CCID);
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1426,23 +1503,20 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- u32 dpfl, imr;
+ u32 imr;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- dpfl = I915_READ(VLV_DPFLIPSTAT);
imr = I915_READ(VLV_IMR);
- if (pipe == 0) {
- dpfl |= PIPEA_VBLANK_INT_EN;
+ if (pipe == 0)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- } else {
- dpfl |= PIPEA_VBLANK_INT_EN;
+ else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- }
- I915_WRITE(VLV_DPFLIPSTAT, dpfl);
I915_WRITE(VLV_IMR, imr);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1492,20 +1566,17 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- u32 dpfl, imr;
+ u32 imr;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- dpfl = I915_READ(VLV_DPFLIPSTAT);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
- if (pipe == 0) {
- dpfl &= ~PIPEA_VBLANK_INT_EN;
+ if (pipe == 0)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- } else {
- dpfl &= ~PIPEB_VBLANK_INT_EN;
+ else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- }
I915_WRITE(VLV_IMR, imr);
- I915_WRITE(VLV_DPFLIPSTAT, dpfl);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -1648,7 +1719,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(HWSTAM, 0xeffe);
/* XXX hotplug from PCH */
@@ -1811,13 +1881,13 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
- dev_priv->gt_irq_mask = ~0;
+ dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
- GEN6_BLITTER_USER_INTERRUPT;
+ GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1840,16 +1910,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 render_irqs;
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- dev_priv->irq_mask = ~enable_mask;
+ /*
+ *Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
+ */
+ dev_priv->irq_mask = (~enable_mask) |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -1868,26 +1946,27 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
+ i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
- GT_GEN6_BLT_CS_ERROR_INTERRUPT |
- GT_GEN6_BLT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_GEN6_BSD_CS_ERROR_INTERRUPT |
- GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
- GT_PIPE_NOTIFY |
- GT_RENDER_CS_ERROR_INTERRUPT |
- GT_SYNC_STATUS |
- GT_USER_INTERRUPT;
-
- dev_priv->gt_irq_mask = ~render_irqs;
+ dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, 0);
- I915_WRITE(GTIER, render_irqs);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+ GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BLT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_RENDER_CS_ERROR_INTERRUPT |
+ GT_SYNC_STATUS |
+ GT_USER_INTERRUPT);
POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */
@@ -2166,9 +2245,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
@@ -2328,10 +2407,8 @@ static void i965_irq_preinstall(struct drm_device * dev)
atomic_set(&dev_priv->irq_received, 0);
- if (I915_HAS_HOTPLUG(dev)) {
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xeffe);
for_each_pipe(pipe)
@@ -2344,11 +2421,13 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -2364,13 +2443,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
- if (I915_HAS_HOTPLUG(dev)) {
- /* Enable in IER... */
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- /* and unmask in IMR */
- dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
- }
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
@@ -2390,36 +2462,40 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
- if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- /* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ /* Note HDMI and DP share hotplug bits */
+ hotplug_en = 0;
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (IS_G4X(dev)) {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
- hotplug_en |= CRT_HOTPLUG_INT_EN;
+ } else {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ }
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
- /* Programming the CRT detection parameters tends
- to generate a spurious hotplug event about three
- seconds later. So just do it once.
- */
- if (IS_G4X(dev))
- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- }
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
- /* Ignore TV since it's buggy */
+ /* Ignore TV since it's buggy */
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- }
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
intel_opregion_enable_asle(dev);
@@ -2477,8 +2553,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
ret = IRQ_HANDLED;
/* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
@@ -2551,10 +2626,8 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- if (I915_HAS_HOTPLUG(dev)) {
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xffffffff);
for_each_pipe(pipe)
@@ -2575,6 +2648,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 48d5e8e051c..acc99b21e0b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -217,6 +217,9 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
@@ -299,6 +302,7 @@
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
@@ -686,10 +690,10 @@
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
+#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
+#define GEN6_BSD_GO_INDICATOR (1 << 4)
#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
@@ -908,6 +912,7 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
@@ -1453,6 +1458,10 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_THREAD_STATUS_REG 0x13805c
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
+
#define GEN6_GT_PERF_STATUS 0x145948
#define GEN6_RP_STATE_LIMITS 0x145994
#define GEN6_RP_STATE_CAP 0x145998
@@ -1462,6 +1471,31 @@
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
+#define CXT_SIZE 0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
+ GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE 0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
+ GEN7_CXT_RING_SIZE(ctx_reg) + \
+ GEN7_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_GT1_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
/*
* Overlay regs
*/
@@ -1566,20 +1600,34 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT 0x61114
-#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
-#define DPB_HOTPLUG_INT_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
-#define DPC_HOTPLUG_INT_STATUS (1 << 28)
-#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (1 << 27)
+/* HDMI/DP bits are gen4+ */
+#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define DPD_HOTPLUG_INT_STATUS (3 << 21)
+#define DPC_HOTPLUG_INT_STATUS (3 << 19)
+#define DPB_HOTPLUG_INT_STATUS (3 << 17)
+/* HDMI bits are shared with the DP bits */
+#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
+#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
+#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
+#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
+/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
/* SDVO port control */
#define SDVOB 0x61140
@@ -1711,8 +1759,10 @@
#define VIDEO_DIP_PORT_C (2 << 29)
#define VIDEO_DIP_PORT_D (3 << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
@@ -1723,7 +1773,11 @@
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
@@ -1795,18 +1849,35 @@
#define PFIT_AUTO_RATIOS 0x61238
/* Backlight control */
-#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL 0x61254
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
*
* The actual value is this field multiplied by two.
*/
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -1816,9 +1887,24 @@
*/
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
#define BLC_HIST_CTL 0x61260
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 0x48250
+#define BLC_PWM_CPU_CTL 0x48254
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 0xc8254
+
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
@@ -2583,13 +2669,13 @@
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define VLV_DPFLIPSTAT 0x70028
-#define PIPEB_LINE_COMPARE_STATUS (1<<29)
+#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
#define SPRITED_FLIPDONE_INT_EN (1<<26)
#define SPRITEC_FLIPDONE_INT_EN (1<<25)
#define PLANEB_FLIPDONE_INT_EN (1<<24)
-#define PIPEA_LINE_COMPARE_STATUS (1<<21)
+#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
#define SPRITEB_FLIPDONE_INT_EN (1<<18)
@@ -2897,13 +2983,14 @@
#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define DSPLINOFF(plane) DSPADDR(plane)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
- (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
+ (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 0x71410
@@ -3771,6 +3858,9 @@
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@@ -3824,7 +3914,6 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */
-#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3855,20 +3944,18 @@
#define PCH_LVDS 0xe1180
#define LVDS_DETECTED (1 << 1)
-#define BLC_PWM_CPU_CTL2 0x48250
-#define PWM_ENABLE (1 << 31)
-#define PWM_PIPE_A (0 << 29)
-#define PWM_PIPE_B (1 << 29)
-#define BLC_PWM_CPU_CTL 0x48254
+/* vlv has 2 sets of panel control regs. */
+#define PIPEA_PP_STATUS 0x61200
+#define PIPEA_PP_CONTROL 0x61204
+#define PIPEA_PP_ON_DELAYS 0x61208
+#define PIPEA_PP_OFF_DELAYS 0x6120c
+#define PIPEA_PP_DIVISOR 0x61210
-#define BLC_PWM_PCH_CTL1 0xc8250
-#define PWM_PCH_ENABLE (1 << 31)
-#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
-#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
-#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
-#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
-
-#define BLC_PWM_PCH_CTL2 0xc8254
+#define PIPEB_PP_STATUS 0x61300
+#define PIPEB_PP_CONTROL 0x61304
+#define PIPEB_PP_ON_DELAYS 0x61308
+#define PIPEB_PP_OFF_DELAYS 0x6130c
+#define PIPEB_PP_DIVISOR 0x61310
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
@@ -3992,6 +4079,7 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
+#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
@@ -4012,10 +4100,15 @@
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
+# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+#define GEN7_UCGCTL4 0x940c
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -4047,6 +4140,7 @@
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4111,6 +4205,26 @@
#define GEN6_RC6 3
#define GEN6_RC7 4
+#define GEN7_MISCCPCTL (0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+
+/* IVYBRIDGE DPF */
+#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
+#define GEN7_PARITY_ERROR_VALID (1<<13)
+#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_PARITY_ERROR_ROW(reg) \
+ ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+#define GEN7_PARITY_ERROR_BANK(reg) \
+ ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+#define GEN7_PARITY_ERROR_SUBBANK(reg) \
+ ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1<<7)
+
+#define GEN7_L3LOG_BASE 0xB070
+#define GEN7_L3LOG_SIZE 0x80
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4177,7 +4291,7 @@
PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (0xf<<28)
+#define PIPE_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
@@ -4335,7 +4449,7 @@
#define PIPE_WM_LINETIME_B 0x45274
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
PIPE_WM_LINETIME_A, \
- PIPE_WM_LINETIME_A)
+ PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
@@ -4347,4 +4461,9 @@
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+#define WM_DBG 0x45280
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
+#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
+#define WM_DBG_DISALLOW_SPRITE (1<<2)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a748e5cabe1..4776ccf1b3c 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -828,10 +828,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_drps(dev);
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_disable_rps(dev);
+ intel_disable_gt_powersave(dev);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 79f83445afa..2f5388af8df 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include "intel_drv.h"
#include "i915_drv.h"
static u32 calc_residency(struct drm_device *dev, const u32 reg)
@@ -92,20 +93,134 @@ static struct attribute_group rc6_attr_group = {
.attrs = rc6_attrs
};
-void i915_setup_sysfs(struct drm_device *dev)
+static int l3_access_valid(struct drm_device *dev, loff_t offset)
+{
+ if (!IS_IVYBRIDGE(dev))
+ return -EPERM;
+
+ if (offset % 4 != 0)
+ return -EINVAL;
+
+ if (offset >= GEN7_L3LOG_SIZE)
+ return -ENXIO;
+
+ return 0;
+}
+
+static ssize_t
+i915_l3_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_device *drm_dev = dminor->dev;
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ uint32_t misccpctl;
+ int i, ret;
+
+ ret = l3_access_valid(drm_dev, offset);
+ if (ret)
+ return ret;
+
+ ret = i915_mutex_lock_interruptible(drm_dev);
+ if (ret)
+ return ret;
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
+ for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
+ *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ return i - offset;
+}
+
+static ssize_t
+i915_l3_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_device *drm_dev = dminor->dev;
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ u32 *temp = NULL; /* Just here to make handling failures easy */
int ret;
- /* ILK doesn't have any residency information */
- if (INTEL_INFO(dev)->gen < 6)
- return;
+ ret = l3_access_valid(drm_dev, offset);
+ if (ret)
+ return ret;
- ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
- DRM_ERROR("sysfs setup failed\n");
+ return ret;
+
+ if (!dev_priv->mm.l3_remap_info) {
+ temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return -ENOMEM;
+ }
+ }
+
+ ret = i915_gpu_idle(drm_dev);
+ if (ret) {
+ kfree(temp);
+ mutex_unlock(&drm_dev->struct_mutex);
+ return ret;
+ }
+
+ /* TODO: Ideally we really want a GPU reset here to make sure errors
+ * aren't propagated. Since I cannot find a stable way to reset the GPU
+ * at this point it is left as a TODO.
+ */
+ if (temp)
+ dev_priv->mm.l3_remap_info = temp;
+
+ memcpy(dev_priv->mm.l3_remap_info + (offset/4),
+ buf + (offset/4),
+ count);
+
+ i915_gem_l3_remap(drm_dev);
+
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ return count;
+}
+
+static struct bin_attribute dpf_attrs = {
+ .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
+ .size = GEN7_L3LOG_SIZE,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
+ .mmap = NULL
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ int ret;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+ &rc6_attr_group);
+ if (ret)
+ DRM_ERROR("RC6 residency sysfs setup failed\n");
+ }
+
+ if (IS_IVYBRIDGE(dev)) {
+ ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+ if (ret)
+ DRM_ERROR("l3 parity sysfs setup failed\n");
+ }
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dac7bba4d9d..fe90b3a84a6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -311,9 +311,33 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_ARGS(ring, seqno)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
+TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_ARGS(ring, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ring)
+ __field(u32, seqno)
+ __field(bool, blocking)
+ ),
+
+ /* NB: the blocking information is racy since mutex_is_locked
+ * doesn't check that the current thread holds the lock. The only
+ * other option would be to pass the boolean information of whether
+ * or not the class was blocking down through the stack which is
+ * less desirable.
+ */
+ TP_fast_assign(
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->seqno = seqno;
+ __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+ ),
+
+ TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
+ __entry->dev, __entry->ring, __entry->seqno,
+ __entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 353459362f6..8c6074154bf 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -692,7 +692,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
*
* Returns 0 on success, nonzero on failure.
*/
-bool
+int
intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index dbda6e3bdf0..31c2107e782 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -476,7 +476,7 @@ struct bdb_edp {
} __attribute__ ((packed));
void intel_setup_bios(struct drm_device *dev);
-bool intel_parse_bios(struct drm_device *dev);
+int intel_parse_bios(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 75a70c46ef1..7ed4a41c396 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -88,6 +88,9 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
+ if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
@@ -129,7 +132,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@@ -230,6 +233,42 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+ u32 save_adpa;
+
+ save_adpa = adpa = I915_READ(ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+ I915_WRITE(ADPA, adpa);
+
+ if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000)) {
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ I915_WRITE(ADPA, save_adpa);
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(ADPA);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+
+ DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ /* FIXME: debug force function and remove */
+ ret = true;
+
+ return ret;
+}
+
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
@@ -249,6 +288,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
+ if (IS_VALLEYVIEW(dev))
+ return valleyview_crt_detect_hotplug(connector);
+
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
@@ -288,39 +330,34 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct edid *edid;
+ struct i2c_adapter *i2c;
- /* CRT should always be at 0, but check anyway */
- if (crt->base.type != INTEL_OUTPUT_ANALOG)
- return false;
+ BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = drm_get_edid(connector, i2c);
- if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
- struct edid *edid;
- bool is_digital = false;
- struct i2c_adapter *i2c;
+ if (edid) {
+ bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
- edid = drm_get_edid(connector, i2c);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
- *
- * On the other hand, what should we do if it is a broken EDID?
*/
- if (edid != NULL) {
- is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
- connector->display_info.raw_edid = NULL;
- kfree(edid);
- }
-
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
- } else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
+
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
+ kfree(edid);
+
return false;
}
@@ -453,18 +490,27 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
if (I915_HAS_HOTPLUG(dev)) {
+ /* We can not rely on the HPD pin always being correctly wired
+ * up, for example many KVM do not pass it through, and so
+ * only trust an assertion that the monitor is connected.
+ */
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- } else {
+ } else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
- return connector_status_disconnected;
- }
}
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
+ /* Load detection is broken on HPD capable machines. Whoever wants a
+ * broken monitor (without edid) to work behind a broken kvm (that fails
+ * to have the right resistors for HP detection) needs to fix this up.
+ * For now just bail out. */
+ if (I915_HAS_HOTPLUG(dev))
+ return connector_status_disconnected;
+
if (!force)
return connector->status;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 46d1e886c69..933c7485917 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -170,6 +170,15 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
udelay(600);
+ /* We need to program FDI_RX_MISC with the default TP1 to TP2
+ * values before enabling the receiver, and configure the delay
+ * for the FDI timing generator to 90h. Luckily, all the other
+ * bits are supposed to be zeroed, so we can write those values
+ * directly.
+ */
+ I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
+ FDI_RX_FDI_DELAY_90);
+
/* Enable CPU FDI Receiver with auto-training */
reg = FDI_RX_CTL(pipe);
I915_WRITE(reg,
@@ -726,8 +735,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a8538ac0299..f6159765f1e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -98,6 +98,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
@@ -359,6 +364,48 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
+static const intel_limit_t intel_limits_vlv_dac = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_hdmi = {
+ .dot = { .min = 20000, .max = 165000 },
+ .vco = { .min = 5994000, .max = 4000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 60, .max = 300 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_dp = {
+ .dot = { .min = 162000, .max = 270000 },
+ .vco = { .min = 5994000, .max = 4000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 60, .max = 300 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
unsigned long flags;
@@ -384,6 +431,28 @@ out_unlock:
return val;
}
+static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
+ u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_DATA, val);
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
+ DPIO_BYTE);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
+ DRM_ERROR("DPIO write wait timed out\n");
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+}
+
static void vlv_init_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -434,7 +503,7 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
* register is uninitialized.
*/
val = I915_READ(reg);
- if (!(val & ~LVDS_DETECTED))
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
dev_priv->lvds_val = val;
}
@@ -510,6 +579,13 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (IS_VALLEYVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
+ limit = &intel_limits_vlv_dac;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ limit = &intel_limits_vlv_hdmi;
+ else
+ limit = &intel_limits_vlv_dp;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
@@ -551,11 +627,10 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
- if (encoder->base.crtc == crtc && encoder->type == type)
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
return true;
return false;
@@ -783,6 +858,73 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 m, n, fastclk;
+ u32 updrate, minupdate, fracbits, p;
+ unsigned long bestppm, ppm, absppm;
+ int dotclk, flag;
+
+ dotclk = target * 1000;
+ bestppm = 1000000;
+ ppm = absppm = 0;
+ fastclk = dotclk / (2*100);
+ updrate = 0;
+ minupdate = 19200;
+ fracbits = 1;
+ n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
+ bestm1 = bestm2 = bestp1 = bestp2 = 0;
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
+ updrate = refclk / n;
+ for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
+ for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
+ if (p2 > 10)
+ p2 = p2 - 1;
+ p = p1 * p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
+ m2 = (((2*(fastclk * p * n / m1 )) +
+ refclk) / (2*refclk));
+ m = m1 * m2;
+ vco = updrate * m;
+ if (vco >= limit->vco.min && vco < limit->vco.max) {
+ ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
+ absppm = (ppm > 0) ? ppm : (-ppm);
+ if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
+ bestppm = 0;
+ flag = 1;
+ }
+ if (absppm < bestppm - 10) {
+ bestppm = absppm;
+ flag = 1;
+ }
+ if (flag) {
+ bestn = n;
+ bestm1 = m1;
+ bestm2 = m2;
+ bestp1 = p1;
+ bestp2 = p2;
+ flag = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ best_clock->n = bestn;
+ best_clock->m1 = bestm1;
+ best_clock->m2 = bestm2;
+ best_clock->p1 = bestp1;
+ best_clock->p2 = bestp2;
+
+ return true;
+}
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
@@ -1232,6 +1374,9 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ "IBX PCH dp port still using transcoder B\n");
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
@@ -1241,6 +1386,9 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ "IBX PCH hdmi port still using transcoder B\n");
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
@@ -1287,7 +1435,7 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* No really, not for ILK+ */
- BUG_ON(dev_priv->info->gen >= 5);
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
@@ -1344,7 +1492,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
unsigned long flags;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
@@ -1358,7 +1506,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
SBI_BUSY |
SBI_CTL_OP_CRWR);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
goto out_unlock;
@@ -1372,10 +1520,10 @@ static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
{
unsigned long flags;
- u32 value;
+ u32 value = 0;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
@@ -1387,7 +1535,7 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
SBI_BUSY |
SBI_CTL_OP_CRRD);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
goto out_unlock;
@@ -1824,6 +1972,22 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
i915_gem_object_unpin(obj);
}
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch)
+{
+ int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+ tiles = *x / (512/bpp);
+ *x %= 512/bpp;
+
+ return tile_rows * pitch * 8 + tiles * 4096;
+}
+
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
{
@@ -1833,7 +1997,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
+ unsigned long linear_offset;
u32 dspcntr;
u32 reg;
@@ -1880,18 +2044,28 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_crtc->dspaddr_offset =
+ gen4_compute_dspaddr_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+ } else {
+ intel_crtc->dspaddr_offset = linear_offset;
+ }
+
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPADDR(plane), Offset);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), Start + Offset);
+ I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
POSTING_READ(reg);
return 0;
@@ -1906,7 +2080,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
+ unsigned long linear_offset;
u32 dspcntr;
u32 reg;
@@ -1961,15 +2135,20 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ intel_crtc->dspaddr_offset =
+ gen4_compute_dspaddr_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPADDR(plane), Offset);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
POSTING_READ(reg);
return 0;
@@ -2656,16 +2835,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
* CPU handles all others */
@@ -3397,7 +3573,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
@@ -3554,16 +3730,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
struct drm_connector *connector;
+ struct intel_encoder *intel_encoder;
unsigned int display_bpc = UINT_MAX, bpc;
/* Walk the encoders & connectors on this crtc, get min bpc */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
- if (encoder->crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
unsigned int lvds_bpc;
@@ -3595,7 +3767,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
/* Not one of the known troublemakers, check the EDID */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- if (connector->encoder != encoder)
+ if (connector->encoder != &intel_encoder->base)
continue;
/* Don't use an invalid EDID bpc value */
@@ -3666,13 +3838,37 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
return display_bpc != bpc;
}
+static int vlv_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk = 27000; /* for DP & HDMI */
+
+ return 100000; /* only one validated so far */
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv))
+ refclk = 100000;
+ else
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ refclk = 100000;
+ }
+
+ return refclk;
+}
+
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (IS_VALLEYVIEW(dev)) {
+ refclk = vlv_get_refclk(crtc);
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
@@ -3787,6 +3983,72 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
I915_WRITE(LVDS, temp);
}
+static void vlv_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int refclk, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll, mdiv, pdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ bool is_hdmi;
+
+ is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ bestn = clock->n;
+ bestm1 = clock->m1;
+ bestm2 = clock->m2;
+ bestp1 = clock->p1;
+ bestp2 = clock->p2;
+
+ /* Enable DPIO clock input */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_POST_DIV_SHIFT);
+ mdiv |= (1 << DPIO_K_SHIFT);
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
+ intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
+
+ pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
+ (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
+
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", pipe);
+
+ if (is_hdmi) {
+ u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
+ }
+
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
+}
+
static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -3974,15 +4236,11 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
u32 dspcntr, pipeconf, vsyncshift;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4044,6 +4302,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (IS_GEN2(dev))
i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
+ refclk, num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
@@ -4282,15 +4543,11 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *edp_encoder = NULL;
int num_connectors = 0;
bool is_lvds = false;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4327,7 +4584,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder, *edp_encoder = NULL;
const intel_limit_t *limit;
int ret;
@@ -4338,10 +4594,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
bool dither;
bool is_cpu_edp = false, is_pch_edp = false;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4405,25 +4658,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
&clock,
&reduced_clock);
}
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (is_sdvo && is_tv) {
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 3;
- clock.m1 = 16;
- clock.m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 6;
- clock.m1 = 12;
- clock.m2 = 8;
- }
- }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
@@ -4431,16 +4669,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_cpu_edp) {
- target_clock = mode->clock;
intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
- /* [e]DP over FDI requires target mode clock
- instead of link clock */
- if (is_dp)
- target_clock = mode->clock;
- else
- target_clock = adjusted_mode->clock;
-
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
@@ -4451,6 +4681,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
+ /* [e]DP over FDI requires target mode clock instead of link clock. */
+ if (edp_encoder)
+ target_clock = intel_edp_target_clock(edp_encoder, mode);
+ else if (is_dp)
+ target_clock = mode->clock;
+ else
+ target_clock = adjusted_mode->clock;
+
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
@@ -4662,16 +4900,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
- }
}
}
@@ -5975,7 +6205,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long offset;
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -5984,9 +6213,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
if (ret)
goto err;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
-
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
@@ -6003,7 +6229,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_ring_advance(ring);
return 0;
@@ -6021,7 +6247,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long offset;
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -6030,9 +6255,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
if (ret)
goto err;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
-
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
@@ -6046,7 +6268,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -6084,7 +6306,9 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
+ intel_ring_emit(ring,
+ (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -6124,7 +6348,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, obj->gtt_offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -6187,7 +6411,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
return 0;
@@ -6219,6 +6443,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
int ret;
+ /* Can't change pixel format via MI display flips. */
+ if (fb->pixel_format != crtc->fb->pixel_format)
+ return -EINVAL;
+
+ /*
+ * TILEOFF/LINOFF registers can't be changed via MI display flips.
+ * Note that pitch changes could also affect these register.
+ */
+ if (INTEL_INFO(dev)->gen > 3 &&
+ (fb->offsets[0] != crtc->fb->offsets[0] ||
+ fb->pitches[0] != crtc->fb->pitches[0]))
+ return -EINVAL;
+
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -6249,7 +6486,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6284,6 +6523,7 @@ cleanup_pending:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
+cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -6566,7 +6806,24 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
+ } else if (IS_VALLEYVIEW(dev)) {
+ int found;
+ if (I915_READ(SDVOB) & PORT_DETECTED) {
+ /* SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, SDVOB, true);
+ if (!found)
+ intel_hdmi_init(dev, SDVOB);
+ if (!found && (I915_READ(DP_B) & DP_DETECTED))
+ intel_dp_init(dev, DP_B);
+ }
+
+ if (I915_READ(SDVOC) & PORT_DETECTED)
+ intel_hdmi_init(dev, SDVOC);
+
+ /* Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -6623,7 +6880,7 @@ static void intel_setup_outputs(struct drm_device *dev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
}
@@ -6777,9 +7034,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.force_wake_get = vlv_force_wake_get;
- dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
}
@@ -6923,20 +7177,18 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
+ */
+ intel_init_power_wells(dev);
- intel_init_clock_gating(dev);
+ intel_prepare_ddi(dev);
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- ironlake_enable_rc6(dev);
- intel_init_emon(dev);
- }
+ intel_init_clock_gating(dev);
- if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
+ mutex_lock(&dev->struct_mutex);
+ intel_enable_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
}
void intel_modeset_init(struct drm_device *dev)
@@ -6958,8 +7210,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_pm(dev);
- intel_prepare_ddi(dev);
-
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -6972,7 +7222,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
- dev->mode_config.fb_base = dev->agp->base;
+ dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@@ -7025,13 +7275,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_drps(dev);
- if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
- gen6_disable_rps(dev);
+ intel_disable_gt_powersave(dev);
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
if (IS_VALLEYVIEW(dev))
vlv_init_dpio(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c0449324143..0a56b9ab0f5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -155,6 +155,18 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
*link_bw = 270000;
}
+int
+intel_edp_target_clock(struct intel_encoder *intel_encoder,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+
+ if (intel_dp->panel_fixed_mode)
+ return intel_dp->panel_fixed_mode->clock;
+ else
+ return mode->clock;
+}
+
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
@@ -225,7 +237,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
static bool
intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -239,8 +251,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
if (mode_rate > max_rate)
return false;
- if (adjusted_mode)
- adjusted_mode->private_flags
+ if (adjust_mode)
+ mode->private_flags
|= INTEL_MODE_DP_FORCE_6BPC;
return true;
@@ -263,7 +275,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
+ if (!intel_dp_adjust_dithering(intel_dp, mode, false))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -691,7 +703,8 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
}
static bool
-intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@@ -706,28 +719,23 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
- /*
- * the mode->clock is used to calculate the Data&Link M/N
- * of the pipe. For the eDP the fixed clock should be used.
- */
- mode->clock = intel_dp->panel_fixed_mode->clock;
}
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock], mode->clock);
+ max_lane_count, bws[max_clock], adjusted_mode->clock);
- if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
+ if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
- mode_rate = intel_dp_link_required(mode->clock, bpp);
+ mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = 0; clock <= max_clock; clock++) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (mode_rate <= link_avail) {
@@ -786,8 +794,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
@@ -797,13 +804,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
/*
* Find the lane count in the intel_encoder private
*/
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
-
- if (encoder->crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
{
@@ -1768,7 +1771,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count) {
+ if (i == intel_dp->lane_count && voltage_tries == 5) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
@@ -1922,7 +1925,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DP |= DP_LINK_TRAIN_OFF;
}
- if (!HAS_PCH_CPT(dev) &&
+ if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2099,25 +2102,23 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t temp, bit;
+ uint32_t bit;
switch (intel_dp->output_reg) {
case DP_B:
- bit = DPB_HOTPLUG_INT_STATUS;
+ bit = DPB_HOTPLUG_LIVE_STATUS;
break;
case DP_C:
- bit = DPC_HOTPLUG_INT_STATUS;
+ bit = DPC_HOTPLUG_LIVE_STATUS;
break;
case DP_D:
- bit = DPD_HOTPLUG_INT_STATUS;
+ bit = DPD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
}
- temp = I915_READ(PORT_HOTPLUG_STAT);
-
- if ((temp & bit) == 0)
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp);
@@ -2399,16 +2400,11 @@ int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ struct intel_encoder *encoder;
- if (encoder->crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
@@ -2520,19 +2516,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
case DP_B:
case PCH_DP_B:
dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS;
+ DPB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
dev_priv->hotplug_supported_mask |=
- HDMIC_HOTPLUG_INT_STATUS;
+ DPC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
dev_priv->hotplug_supported_mask |=
- HDMID_HOTPLUG_INT_STATUS;
+ DPD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3e0918834e7..84353559441 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -169,6 +169,7 @@ struct intel_crtc {
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
bool active; /* is the crtc on? independent of the dpms mode */
+ bool primary_disabled; /* is the crtc obscured by a plane? */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
@@ -176,6 +177,11 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+ /* Display surface base address adjustement for pageflips. Note that on
+ * gen4+ this only adjusts up to a tile, offsets within a tile are
+ * handled in the hw itself (with the TILEOFF register). */
+ unsigned long dspaddr_offset;
+
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
@@ -191,7 +197,6 @@ struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
- bool primary_disabled;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@@ -301,6 +306,8 @@ struct intel_hdmi {
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
};
static inline struct drm_crtc *
@@ -335,7 +342,6 @@ struct intel_fbc_work {
};
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
@@ -343,9 +349,6 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *adjusted_mode);
-extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@@ -360,6 +363,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
+extern int intel_edp_target_clock(struct intel_encoder *,
+ struct drm_display_mode *mode);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
@@ -372,13 +377,14 @@ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev);
-extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
@@ -423,9 +429,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void ironlake_disable_rc6(struct drm_device *dev);
-extern void ironlake_enable_drps(struct drm_device *dev);
-extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -492,10 +495,11 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
+extern void intel_init_power_wells(struct drm_device *dev);
+extern void intel_enable_gt_powersave(struct drm_device *dev);
+extern void intel_disable_gt_powersave(struct drm_device *dev);
+extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
+extern void ironlake_teardown_rc6(struct drm_device *dev);
extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 60ba50b956f..36c542e5036 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -136,7 +136,7 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index bf8690720a0..97f673523b9 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
@@ -140,7 +140,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
- info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
+ info->screen_base =
+ ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+ size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ead3bf7c21..98f602427eb 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,6 +37,19 @@
#include "i915_drm.h"
#include "i915_drv.h"
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t enabled_bits;
+
+ enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+ WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
+}
+
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
@@ -121,36 +134,31 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
- val &= ~VIDEO_DIP_PORT_MASK;
- if (intel_hdmi->sdvox_reg == SDVOB)
- val |= VIDEO_DIP_PORT_B;
- else if (intel_hdmi->sdvox_reg == SDVOC)
- val |= VIDEO_DIP_PORT_C;
- else
- return;
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(VIDEO_DIP_CTL, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(VIDEO_DIP_CTL, val);
+ POSTING_READ(VIDEO_DIP_CTL);
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -160,46 +168,32 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- val &= ~VIDEO_DIP_PORT_MASK;
- switch (intel_hdmi->sdvox_reg) {
- case HDMIB:
- val |= VIDEO_DIP_PORT_B;
- break;
- case HDMIC:
- val |= VIDEO_DIP_PORT_C;
- break;
- case HDMID:
- val |= VIDEO_DIP_PORT_D;
- break;
- default:
- return;
- }
-
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -213,32 +207,31 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
- if (frame->type == DIP_TYPE_AVI)
- val |= VIDEO_DIP_ENABLE_AVI;
- else
+ if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
-
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -252,26 +245,28 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -289,18 +284,19 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
if (data_reg == 0)
return;
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
val &= ~hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(data_reg + i, *data);
data++;
}
+ mmiowb();
val |= hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
+ POSTING_READ(ctl_reg);
}
static void intel_set_infoframe(struct drm_encoder *encoder,
@@ -308,14 +304,11 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- if (!intel_hdmi->has_hdmi_sink)
- return;
-
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
}
-void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
@@ -330,7 +323,7 @@ void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_set_infoframe(encoder, &avi_if);
}
-void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@@ -345,6 +338,223 @@ void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
intel_set_infoframe(encoder, &spd_if);
}
+static void g4x_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VIDEO_DIP_CTL;
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* If the registers were not initialized yet, they might be zeroes,
+ * which means we're selecting the AVI DIP and we're setting its
+ * frequency to once. This seems to really confuse the HW and make
+ * things stop working (the register spec says the AVI always needs to
+ * be sent every VSync). So here we avoid writing to the register more
+ * than we need and also explicitly select the AVI DIP and explicitly
+ * set its frequency to every VSync. Avoiding to write it twice seems to
+ * be enough to solve the problem, but being defensive shouldn't hurt us
+ * either. */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case SDVOC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ default:
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~VIDEO_DIP_ENABLE_VENDOR;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void ibx_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ port = VIDEO_DIP_PORT_D;
+ break;
+ default:
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void cpt_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ /* Set both together, unset both together: see the spec. */
+ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void vlv_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void hsw_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ I915_WRITE(reg, 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -355,7 +565,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
- sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ sdvox = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev))
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -382,14 +592,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == 1)
+ else if (intel_crtc->pipe == PIPE_B)
sdvox |= SDVO_PIPE_B_SELECT;
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -405,6 +614,36 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp = I915_READ(intel_hdmi->sdvox_reg);
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ msleep(50);
+ }
+ } else {
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ enable_bits |= SDVO_PIPE_B_SELECT;
+ }
+ }
+
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
@@ -446,12 +685,33 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
}
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
+static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit;
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ bit = HDMIB_HOTPLUG_LIVE_STATUS;
+ break;
+ case SDVOC:
+ bit = HDMIC_HOTPLUG_LIVE_STATUS;
+ break;
+ default:
+ bit = 0;
+ break;
+ }
+
+ return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
@@ -460,6 +720,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
+ if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
+ return status;
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
@@ -633,7 +896,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
- int i;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
@@ -710,26 +972,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
- I915_WRITE(VIDEO_DIP_CTL, 0);
+ intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = vlv_set_infoframes;
} else if (IS_HASWELL(dev)) {
- /* FIXME: Haswell has a new set of DIP frame registers, but we are
- * just doing the minimal required for HDMI to work at this stage.
- */
intel_hdmi->write_infoframe = hsw_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = hsw_set_infoframes;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = ibx_set_infoframes;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = cpt_set_infoframes;
}
if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 08eb04c787e..e05c0d3e344 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -71,6 +71,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -107,7 +108,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
- intel_panel_enable_backlight(dev);
+ intel_panel_enable_backlight(dev, intel_crtc->pipe);
}
static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@@ -228,14 +229,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
}
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct drm_encoder *tmp_encoder;
+ struct intel_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -246,8 +247,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* Should never happen!! */
- list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
- if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
+ for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
+ if (&tmp_encoder->base != encoder) {
DRM_ERROR("Can't enable LVDS and another "
"encoder on the same pipe\n");
return false;
@@ -408,13 +409,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- /*
- * Prior to Ironlake, we must disable the pipe if we want to adjust
- * the panel fitter. However at all other times we can just reset
- * the registers regardless.
- */
- if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
- intel_lvds_disable(intel_lvds);
+ intel_lvds_disable(intel_lvds);
}
static void intel_lvds_commit(struct drm_encoder *encoder)
@@ -777,6 +772,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "ZOTAC ZBOXSD-ID12/ID13",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
+ DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+ },
+ },
{ } /* terminating entry */
};
@@ -967,6 +970,8 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else if (IS_GEN4(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@@ -1074,35 +1079,14 @@ bool intel_lvds_init(struct drm_device *dev)
goto failed;
out:
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
if (HAS_PCH_SPLIT(dev)) {
- u32 pwm;
-
- pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
-
- /* make sure PWM is enabled and locked to the LVDS pipe */
- pwm = I915_READ(BLC_PWM_CPU_CTL2);
- if (pipe == 0 && (pwm & PWM_PIPE_B))
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
- if (pipe)
- pwm |= PWM_PIPE_B;
- else
- pwm &= ~PWM_PIPE_B;
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
-
- pwm = I915_READ(BLC_PWM_PCH_CTL1);
- pwm |= PWM_PCH_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
- /*
- * Unlock registers and just
- * leave them unlocked
- */
I915_WRITE(PCH_PP_CONTROL,
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
} else {
- /*
- * Unlock registers and just
- * leave them unlocked
- */
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d67ec3a51e4..45848b9b670 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -33,34 +33,6 @@
#include "i915_drv.h"
/**
- * intel_ddc_probe
- *
- */
-bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
-{
- struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
- u8 out_buf[] = { 0x0, 0x0};
- u8 buf[2];
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
- msgs, 2) == 2;
-}
-
-/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: i2c adapter
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 458743da377..830d0dd610e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -226,7 +226,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
@@ -452,7 +452,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 2a1625d84a6..10c7d39034e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -56,7 +56,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -287,9 +287,24 @@ void intel_panel_disable_backlight(struct drm_device *dev)
dev_priv->backlight_enabled = false;
intel_panel_actually_set_backlight(dev, 0);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+ I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
}
-void intel_panel_enable_backlight(struct drm_device *dev)
+void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -298,6 +313,40 @@ void intel_panel_enable_backlight(struct drm_device *dev)
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+
+ tmp = I915_READ(reg);
+
+ /* Note that this can also get called through dpms changes. And
+ * we don't track the backlight dpms state, hence check whether
+ * we have to do anything first. */
+ if (tmp & BLM_PWM_ENABLE)
+ return;
+
+ if (dev_priv->num_pipe == 3)
+ tmp &= ~BLM_PIPE_SELECT_IVB;
+ else
+ tmp &= ~BLM_PIPE_SELECT;
+
+ tmp |= BLM_PIPE(pipe);
+ tmp &= ~BLM_PWM_ENABLE;
+
+ I915_WRITE(reg, tmp);
+ POSTING_READ(reg);
+ I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_PWM_ENABLE;
+ tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
}
static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d0ce2a5b1d3..94aabcaa3a6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -387,8 +387,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_i915_gem_object *obj;
int enable_fbc;
- DRM_DEBUG_KMS("\n");
-
if (!i915_powersave)
return;
@@ -405,7 +403,9 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (tmp_crtc->enabled &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled &&
+ tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -2182,7 +2182,7 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
return true;
}
-void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
@@ -2246,7 +2246,7 @@ void ironlake_enable_drps(struct drm_device *dev)
getrawmonotonic(&dev_priv->last_time2);
}
-void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl = I915_READ16(MEMSWCTL);
@@ -2299,10 +2299,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
dev_priv->cur_delay = val;
}
-void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
@@ -2332,9 +2333,11 @@ int intel_enable_rc6(const struct drm_device *dev)
if (INTEL_INFO(dev)->gen == 5)
return 0;
- /* Sorry Haswell, no RC6 for you for now. */
+ /* On Haswell, only RC6 is available. So let's enable it by default to
+ * provide better testing and coverage since the beginning.
+ */
if (IS_HASWELL(dev))
- return 0;
+ return INTEL_RC6_ENABLE;
/*
* Disable rc6 on Sandybridge
@@ -2347,8 +2350,9 @@ int intel_enable_rc6(const struct drm_device *dev)
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
+static void gen6_enable_rps(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
@@ -2357,6 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
int rc6_mode;
int i;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
*
@@ -2364,7 +2370,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- mutex_lock(&dev_priv->dev->struct_mutex);
/* Clear the DBG now so we don't confuse earlier errors */
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
@@ -2400,20 +2405,24 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+ /* Check if we are enabling RC6 */
rc6_mode = intel_enable_rc6(dev_priv->dev);
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ /* We don't use those on Haswell */
+ if (!IS_HASWELL(dev)) {
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ }
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+ (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -2431,10 +2440,19 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
dev_priv->max_delay << 24 |
dev_priv->min_delay << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
- I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+ } else {
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+ }
+
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -2442,7 +2460,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
+ (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -2473,14 +2491,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */
- I915_WRITE(GEN6_PMIER,
- GEN6_PM_MBOX_EVENT |
- GEN6_PM_THERMAL_EVENT |
- GEN6_PM_RP_DOWN_TIMEOUT |
- GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_UP_EI_EXPIRED |
- GEN6_PM_RP_DOWN_EI_EXPIRED);
+ I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
spin_lock_irq(&dev_priv->rps_lock);
WARN_ON(dev_priv->pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
@@ -2489,15 +2500,17 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, 0);
gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
}
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+static void gen6_update_ring_freq(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
int gpu_freq, ia_freq, max_ia_freq;
int scaling_factor = 180;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
max_ia_freq = cpufreq_quick_get_max(0);
/*
* Default to measured freq if none found, PCU will ensure we don't go
@@ -2509,8 +2522,6 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
- mutex_lock(&dev_priv->dev->struct_mutex);
-
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
@@ -2541,11 +2552,9 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
continue;
}
}
-
- mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void ironlake_teardown_rc6(struct drm_device *dev)
+void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2562,7 +2571,7 @@ static void ironlake_teardown_rc6(struct drm_device *dev)
}
}
-void ironlake_disable_rc6(struct drm_device *dev)
+static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2578,8 +2587,6 @@ void ironlake_disable_rc6(struct drm_device *dev)
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
}
-
- ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
@@ -2601,7 +2608,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
return 0;
}
-void ironlake_enable_rc6(struct drm_device *dev)
+static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
@@ -2613,12 +2620,11 @@ void ironlake_enable_rc6(struct drm_device *dev)
if (!intel_enable_rc6(dev))
return;
- mutex_lock(&dev->struct_mutex);
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
ret = ironlake_setup_rc6(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return;
- }
/*
* GPU can automatically power down the render unit if given a page
@@ -2627,7 +2633,6 @@ void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
return;
}
@@ -2652,13 +2657,11 @@ void ironlake_enable_rc6(struct drm_device *dev)
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
return;
}
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- mutex_unlock(&dev->struct_mutex);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -3154,8 +3157,7 @@ void intel_gpu_ips_teardown(void)
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
}
-
-void intel_init_emon(struct drm_device *dev)
+static void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
@@ -3226,6 +3228,28 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+void intel_disable_gt_powersave(struct drm_device *dev)
+{
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_disable_drps(dev);
+ ironlake_disable_rc6(dev);
+ } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ gen6_disable_rps(dev);
+ }
+}
+
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ }
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3328,8 +3352,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
*/
I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
@@ -3357,6 +3385,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -3377,7 +3408,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
@@ -3427,13 +3458,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
/* WaDisable4x2SubspanOptimization */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /* XXX: This is a workaround for early silicon revisions and should be
+ * removed later.
+ */
+ I915_WRITE(WM_DBG,
+ I915_READ(WM_DBG) |
+ WM_DBG_DISALLOW_MULTIPLE_LP |
+ WM_DBG_DISALLOW_SPRITE |
+ WM_DBG_DISALLOW_MAXFIFO);
+
}
-static void valleyview_init_clock_gating(struct drm_device *dev)
+static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t snpcr;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
@@ -3441,10 +3483,77 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
*/
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= GEN6_MBC_SNPCR_MED;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
@@ -3465,6 +3574,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+ GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -3474,6 +3612,19 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * On ValleyView, the GUnit needs to signal the GT
+ * when flip and other events complete. So enable
+ * all the GUnit->GT interrupts here
+ */
+ I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
+ PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
+ SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
+ PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
+ PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
+ SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
+ PLANEA_FLIPDONE_INT_EN);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3681,34 +3832,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
@@ -3756,7 +3879,7 @@ void intel_init_pm(struct drm_device *dev)
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else
dev_priv->display.update_wm = NULL;
@@ -3764,8 +3887,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
- dev_priv->display.force_wake_get = vlv_force_wake_get;
- dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
@@ -3811,10 +3932,196 @@ void intel_init_pm(struct drm_device *dev)
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
+
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
- /* We attempt to init the necessary power wells early in the initialization
- * time, so the subsystems that expect power to be enabled can work.
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
*/
- intel_init_power_wells(dev);
+ if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_ACK;
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count++ == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (--dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->gt_fifo_count = fifo;
+ }
+ dev_priv->gt_fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ /* Already awake? */
+ if ((I915_READ(0x130094) & 0xa1) == 0xa1)
+ return;
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
+ POSTING_READ(FORCEWAKE_VLV);
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
+ /* FIXME: confirm VLV behavior with Punit folks */
+ POSTING_READ(FORCEWAKE_VLV);
+}
+
+void intel_gt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_init(&dev_priv->gt_lock);
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->gt.force_wake_get = vlv_force_wake_get;
+ dev_priv->gt.force_wake_put = vlv_force_wake_put;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e5b84ff89ca..bf0195a96d5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -219,19 +219,28 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- intel_emit_post_sync_nonzero_flush(ring);
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed (but only if the caller actually wants that).
+ */
+ if (flush_domains)
+ flags |= PIPE_CONTROL_CS_STALL;
ret = intel_ring_begin(ring, 6);
if (ret)
@@ -433,11 +442,21 @@ static int init_render_ring(struct intel_ring_buffer *ring)
*/
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ /* This is not explicitly set for GEN6, so read the register.
+ * see intel_ring_mi_set_context() for why we care.
+ * TODO: consider explicitly setting the bit for GEN5
+ */
+ ring->itlb_before_ctx_switch =
+ !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
}
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
return ret;
}
@@ -825,7 +844,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+ GEN6_RENDER_L3_PARITY_ERROR));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -844,7 +867,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~0);
+ if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+ else
+ I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -946,6 +972,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
+ ret = -ENOMEM;
goto err_unpin;
}
ring->status_page.obj = obj;
@@ -969,6 +996,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ring->dev = dev;
@@ -1002,8 +1030,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unpin;
- ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
- ring->size);
+ ring->virtual_start =
+ ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+ ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
@@ -1089,20 +1118,9 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- bool was_interruptible;
int ret;
- /* XXX As we have not yet audited all the paths to check that
- * they are ready for ERESTARTSYS from intel_ring_begin, do not
- * allow us to be interruptible by a signal.
- */
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- ret = i915_wait_request(ring, seqno);
-
- dev_priv->mm.interruptible = was_interruptible;
+ ret = i915_wait_seqno(ring, seqno);
if (!ret)
i915_gem_retire_requests_ring(ring);
@@ -1200,8 +1218,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
}
msleep(1);
- if (atomic_read(&dev_priv->mm.wedged))
- return -EAGAIN;
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end(ring);
return -EBUSY;
@@ -1210,12 +1230,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
- if (unlikely(atomic_read(&dev_priv->mm.wedged)))
- return -EIO;
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
@@ -1250,20 +1271,31 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
- I915_WRITE(GEN6_BSD_RNCID, 0x0);
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
- 50))
- DRM_ERROR("timed out waiting for IDLE Indicator\n");
+ GEN6_BSD_SLEEP_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+ /* Now that the ring is fully powered up, update the tail */
I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 55d3da26bae..1d3c81fdad9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -113,9 +113,17 @@ struct intel_ring_buffer {
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
+ bool gpu_caches_dirty;
wait_queue_head_t irq_queue;
+ /**
+ * Do an explicit TLB flush before MI_SET_CONTEXT
+ */
+ bool itlb_before_ctx_switch;
+ struct i915_hw_context *default_context;
+ struct drm_i915_gem_object *last_context_obj;
+
void *private;
};
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index b6a9d45fc3c..26a6a4d0d07 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -140,9 +140,6 @@ struct intel_sdvo {
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
-
- /* Input timings for adjusted_mode */
- struct intel_sdvo_dtd input_dtd;
};
struct intel_sdvo_connector {
@@ -938,7 +935,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
static bool
intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_sdvo_dtd output_dtd;
@@ -953,11 +950,15 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
return true;
}
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
static bool
-intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+ struct intel_sdvo_dtd input_dtd;
+
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
@@ -969,16 +970,16 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &intel_sdvo->input_dtd))
+ &input_dtd))
return false;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
return true;
}
static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
@@ -993,17 +994,17 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
} else if (intel_sdvo->is_lvds) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1057,7 +1058,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo->sdvo_lvds_fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ DRM_INFO("Setting output timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1079,7 +1082,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+ if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+ DRM_INFO("Setting input timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
switch (pixel_multiplier) {
default:
@@ -1376,7 +1381,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
- mdelay(30);
+ msleep(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
@@ -2521,6 +2526,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
+ u32 hotplug_mask;
int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
@@ -2552,10 +2558,18 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
}
}
- if (intel_sdvo->is_sdvob)
- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
- else
- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+ hotplug_mask = 0;
+ if (IS_G4X(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
+ } else if (IS_GEN4(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
+ } else {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
+ }
+ dev_priv->hotplug_supported_mask |= hotplug_mask;
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 2a20fb0781d..cc8df4de2d9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -56,6 +56,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
sprctl &= ~SPRITE_PIXFORMAT_MASK;
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SPRITE_TILED;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -84,7 +85,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- sprctl |= DVS_FORMAT_RGBX888;
+ sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
}
@@ -233,6 +234,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr &= ~DVS_PIXFORMAT_MASK;
dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+ dvscntr &= ~DVS_TILED;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -326,6 +328,12 @@ intel_enable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
+ if (!intel_crtc->primary_disabled)
+ return;
+
+ intel_crtc->primary_disabled = false;
+ intel_update_fbc(dev);
+
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
}
@@ -337,7 +345,13 @@ intel_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
+ if (intel_crtc->primary_disabled)
+ return;
+
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+
+ intel_crtc->primary_disabled = true;
+ intel_update_fbc(dev);
}
static int
@@ -485,18 +499,14 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* Be sure to re-enable the primary before the sprite is no longer
* covering it fully.
*/
- if (!disable_primary && intel_plane->primary_disabled) {
+ if (!disable_primary)
intel_enable_primary(crtc);
- intel_plane->primary_disabled = false;
- }
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
- if (disable_primary) {
+ if (disable_primary)
intel_disable_primary(crtc);
- intel_plane->primary_disabled = true;
- }
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
@@ -527,11 +537,8 @@ intel_disable_plane(struct drm_plane *plane)
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
- if (intel_plane->primary_disabled) {
+ if (plane->crtc)
intel_enable_primary(plane->crtc);
- intel_plane->primary_disabled = false;
- }
-
intel_plane->disable_plane(plane);
if (!intel_plane->obj)
@@ -685,6 +692,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break;
default:
+ kfree(intel_plane);
return -ENODEV;
}
@@ -699,4 +707,3 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
return ret;
}
-
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index a233a51fd7e..befce6c4970 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -891,24 +891,21 @@ intel_tv_mode_valid(struct drm_connector *connector,
static bool
-intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+intel_tv_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_mode_config *drm_config = &dev->mode_config;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- struct drm_encoder *other_encoder;
+ struct intel_encoder *other_encoder;
if (!tv_mode)
return false;
- /* FIXME: lock encoder list */
- list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
- if (other_encoder != encoder &&
- other_encoder->crtc == encoder->crtc)
+ for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
+ if (&other_encoder->base != encoder)
return false;
- }
adjusted_mode->clock = tv_mode->clock;
return true;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index f9a925d5881..b1bb46de3f5 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -75,7 +75,6 @@ static struct drm_driver driver = {
.irq_postinstall = mga_driver_irq_postinstall,
.irq_uninstall = mga_driver_irq_uninstall,
.irq_handler = mga_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = &mga_driver_fops,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 93e832d6c32..ea1024d7997 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -47,6 +47,9 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d303061b251..a4d7c500c97 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -78,8 +78,8 @@ static inline void mga_wait_busy(struct mga_device *mdev)
* to just pass that straight through, so this does nothing
*/
static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
@@ -1322,8 +1322,8 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
* to handle any encoder-specific limitations
*/
static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index fe5267d06ab..1cece6a78f3 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
- nouveau_object.o nouveau_irq.o nouveau_notifier.o \
+ nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
@@ -12,6 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
+ nouveau_abi16.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
new file mode 100644
index 00000000000..ff23d88880e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_abi16.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+int
+nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_getparam *getparam = data;
+
+ switch (getparam->param) {
+ case NOUVEAU_GETPARAM_CHIPSET_ID:
+ getparam->value = dev_priv->chipset;
+ break;
+ case NOUVEAU_GETPARAM_PCI_VENDOR:
+ getparam->value = dev->pci_vendor;
+ break;
+ case NOUVEAU_GETPARAM_PCI_DEVICE:
+ getparam->value = dev->pci_device;
+ break;
+ case NOUVEAU_GETPARAM_BUS_TYPE:
+ if (drm_pci_device_is_agp(dev))
+ getparam->value = 0;
+ else
+ if (!pci_is_pcie(dev->pdev))
+ getparam->value = 1;
+ else
+ getparam->value = 2;
+ break;
+ case NOUVEAU_GETPARAM_FB_SIZE:
+ getparam->value = dev_priv->fb_available_size;
+ break;
+ case NOUVEAU_GETPARAM_AGP_SIZE:
+ getparam->value = dev_priv->gart_info.aper_size;
+ break;
+ case NOUVEAU_GETPARAM_VM_VRAM_BASE:
+ getparam->value = 0; /* deprecated */
+ break;
+ case NOUVEAU_GETPARAM_PTIMER_TIME:
+ getparam->value = dev_priv->engine.timer.read(dev);
+ break;
+ case NOUVEAU_GETPARAM_HAS_BO_USAGE:
+ getparam->value = 1;
+ break;
+ case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+ getparam->value = 1;
+ break;
+ case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ /* NV40 and NV50 versions are quite different, but register
+ * address is the same. User is supposed to know the card
+ * family anyway... */
+ if (dev_priv->chipset >= 0x40) {
+ getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+ break;
+ }
+ /* FALLTHRU */
+ default:
+ NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
+{
+ return -EINVAL;
+}
+
+int
+nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_channel_alloc *init = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ if (!dev_priv->eng[NVOBJ_ENGINE_GR])
+ return -ENODEV;
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return -EINVAL;
+
+ ret = nouveau_channel_alloc(dev, &chan, file_priv,
+ init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle);
+ if (ret)
+ return ret;
+ init->channel = chan->id;
+
+ if (nouveau_vram_pushbuf == 0) {
+ if (chan->dma.ib_max)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+ } else {
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ }
+
+ if (dev_priv->card_type < NV_C0) {
+ init->subchan[0].handle = 0x00000000;
+ init->subchan[0].grclass = 0x0000;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
+ }
+
+ /* Named memory object area */
+ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
+ &init->notifier_handle);
+
+ if (ret == 0)
+ atomic_inc(&chan->users); /* userspace reference */
+ nouveau_channel_put(&chan);
+ return ret;
+}
+
+int
+nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_channel_free *req = data;
+ struct nouveau_channel *chan;
+
+ chan = nouveau_channel_get(file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ list_del(&chan->list);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
+ return 0;
+}
+
+int
+nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_grobj_alloc *init = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ if (init->handle == ~0)
+ return -EINVAL;
+
+ /* compatibility with userspace that assumes 506e for all chipsets */
+ if (init->class == 0x506e) {
+ init->class = nouveau_software_class(dev);
+ if (init->class == 0x906e)
+ return 0;
+ } else
+ if (init->class == 0x906e) {
+ NV_ERROR(dev, "906e not supported yet\n");
+ return -EINVAL;
+ }
+
+ chan = nouveau_channel_get(file_priv, init->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ if (nouveau_ramht_find(chan, init->handle)) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
+ if (ret) {
+ NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ }
+
+out:
+ nouveau_channel_put(&chan);
+ return ret;
+}
+
+int
+nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_notifierobj_alloc *na = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ /* completely unnecessary for these chipsets... */
+ if (unlikely(dev_priv->card_type >= NV_C0))
+ return -EINVAL;
+
+ chan = nouveau_channel_get(file_priv, na->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+ &na->offset);
+ nouveau_channel_put(&chan);
+ return ret;
+}
+
+int
+nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_gpuobj_free *objfree = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = nouveau_channel_get(file_priv, objfree->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ /* Synchronize with the user channel */
+ nouveau_channel_idle(chan);
+
+ ret = nouveau_ramht_remove(chan, objfree->handle);
+ nouveau_channel_put(&chan);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
new file mode 100644
index 00000000000..e6328b008a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -0,0 +1,83 @@
+#ifndef __NOUVEAU_ABI16_H__
+#define __NOUVEAU_ABI16_H__
+
+#define ABI16_IOCTL_ARGS \
+ struct drm_device *dev, void *data, struct drm_file *file_priv
+int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
+
+struct drm_nouveau_channel_alloc {
+ uint32_t fb_ctxdma_handle;
+ uint32_t tt_ctxdma_handle;
+
+ int channel;
+ uint32_t pushbuf_domains;
+
+ /* Notifier memory */
+ uint32_t notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ uint32_t handle;
+ uint32_t grclass;
+ } subchan[8];
+ uint32_t nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+ int channel;
+ uint32_t handle;
+ int class;
+};
+
+struct drm_nouveau_notifierobj_alloc {
+ uint32_t channel;
+ uint32_t handle;
+ uint32_t size;
+ uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+ int channel;
+ uint32_t handle;
+};
+
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+struct drm_nouveau_getparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+struct drm_nouveau_setparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
+#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
+#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 2f11e16a81a..a0a3fe3c016 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6091,6 +6091,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
+ /* fdo#50830: connector indices for VGA and DVI-I are backwards */
+ if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) {
+ if (idx == 0 && *conn == 0x02000300)
+ *conn = 0x02011300;
+ else
+ if (idx == 1 && *conn == 0x04011310)
+ *conn = 0x04000310;
+ else
+ if (idx == 2 && *conn == 0x02011312)
+ *conn = 0x02000312;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 629d8a2df5b..debd90225a8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -395,98 +395,3 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
nouveau_channel_put(&chan);
}
}
-
-
-/***********************************
- * ioctls wrapping the functions
- ***********************************/
-
-static int
-nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_channel_alloc *init = data;
- struct nouveau_channel *chan;
- int ret;
-
- if (!dev_priv->eng[NVOBJ_ENGINE_GR])
- return -ENODEV;
-
- if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
- return -EINVAL;
-
- ret = nouveau_channel_alloc(dev, &chan, file_priv,
- init->fb_ctxdma_handle,
- init->tt_ctxdma_handle);
- if (ret)
- return ret;
- init->channel = chan->id;
-
- if (nouveau_vram_pushbuf == 0) {
- if (chan->dma.ib_max)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
- NOUVEAU_GEM_DOMAIN_GART;
- else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- else
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- } else {
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- }
-
- if (dev_priv->card_type < NV_C0) {
- init->subchan[0].handle = 0x00000000;
- init->subchan[0].grclass = 0x0000;
- init->subchan[1].handle = NvSw;
- init->subchan[1].grclass = NV_SW;
- init->nr_subchan = 2;
- }
-
- /* Named memory object area */
- ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
- &init->notifier_handle);
-
- if (ret == 0)
- atomic_inc(&chan->users); /* userspace reference */
- nouveau_channel_put(&chan);
- return ret;
-}
-
-static int
-nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_channel_free *req = data;
- struct nouveau_channel *chan;
-
- chan = nouveau_channel_get(file_priv, req->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- list_del(&chan->list);
- atomic_dec(&chan->users);
- nouveau_channel_put(&chan);
- return 0;
-}
-
-/***********************************
- * finally, the ioctl table
- ***********************************/
-
-struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
-};
-
-int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index cad254c8e38..9a36f5f39b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -29,6 +29,7 @@
#include "drm.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
+#include "nouveau_abi16.h"
#include "nouveau_hw.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
@@ -384,6 +385,21 @@ nouveau_pci_resume(struct pci_dev *pdev)
return 0;
}
+static struct drm_ioctl_desc nouveau_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
static const struct file_operations nouveau_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -422,7 +438,6 @@ static struct drm_driver driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = nouveau_vblank_enable,
.disable_vblank = nouveau_vblank_disable,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
@@ -463,7 +478,7 @@ static struct pci_driver nouveau_pci_driver = {
static int __init nouveau_init(void)
{
- driver.num_ioctls = nouveau_max_ioctl;
+ driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
if (nouveau_modeset == -1) {
#ifdef CONFIG_VGA_CONSOLE
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8613cb23808..4f2cc95ce26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -689,8 +689,6 @@ struct drm_nouveau_private {
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
- struct list_head vbl_waiting;
-
struct {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
@@ -872,10 +870,6 @@ extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_firstopen(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *);
extern int nouveau_unload(struct drm_device *);
-extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
- struct drm_file *);
extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
@@ -914,15 +908,8 @@ extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
int cout, uint32_t start, uint32_t end,
uint32_t *offset);
-extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
-extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
- struct drm_file *);
/* nouveau_channel.c */
-extern struct drm_ioctl_desc nouveau_ioctls[];
-extern int nouveau_max_ioctl;
extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
extern int nouveau_channel_alloc(struct drm_device *dev,
struct nouveau_channel **chan,
@@ -938,7 +925,7 @@ extern void nouveau_channel_ref(struct nouveau_channel *chan,
struct nouveau_channel **pchan);
extern int nouveau_channel_idle(struct nouveau_channel *chan);
-/* nouveau_object.c */
+/* nouveau_gpuobj.c */
#define NVOBJ_ENGINE_ADD(d, e, p) do { \
struct drm_nouveau_private *dev_priv = (d)->dev_private; \
dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
@@ -993,10 +980,6 @@ extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
int class, u64 base, u64 size, int target,
int access, u32 type, u32 comp);
-extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
- struct drm_file *);
/* nouveau_irq.c */
extern int nouveau_irq_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 30f54231694..af7cfb82571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -207,8 +207,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
- dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
+ dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping;
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
@@ -342,6 +341,7 @@ retry:
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_ERROR(dev, "multiple instances of buffer %d on "
"validation list\n", b->handle);
+ drm_gem_object_unreference_unlocked(gem);
validate_fini(op, NULL);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
index b190cc01c82..bd79fedb705 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
@@ -758,66 +758,6 @@ nouveau_gpuobj_resume(struct drm_device *dev)
dev_priv->engine.instmem.flush(dev);
}
-int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_grobj_alloc *init = data;
- struct nouveau_channel *chan;
- int ret;
-
- if (init->handle == ~0)
- return -EINVAL;
-
- /* compatibility with userspace that assumes 506e for all chipsets */
- if (init->class == 0x506e) {
- init->class = nouveau_software_class(dev);
- if (init->class == 0x906e)
- return 0;
- } else
- if (init->class == 0x906e) {
- NV_ERROR(dev, "906e not supported yet\n");
- return -EINVAL;
- }
-
- chan = nouveau_channel_get(file_priv, init->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- if (nouveau_ramht_find(chan, init->handle)) {
- ret = -EEXIST;
- goto out;
- }
-
- ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
- if (ret) {
- NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
- ret, init->channel, init->handle);
- }
-
-out:
- nouveau_channel_put(&chan);
- return ret;
-}
-
-int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_channel *chan;
- int ret;
-
- chan = nouveau_channel_get(file_priv, objfree->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- /* Synchronize with the user channel */
- nouveau_channel_idle(chan);
-
- ret = nouveau_ramht_remove(chan, objfree->handle);
- nouveau_channel_put(&chan);
- return ret;
-}
-
u32
nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 868c7fd7485..b2c2937531a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -41,12 +41,8 @@
void
nouveau_irq_preinstall(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
-
- INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2ef883c4bbc..69c93b86451 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -161,44 +161,3 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
*b_offset = mem->start;
return 0;
}
-
-int
-nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
-{
- if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
- return -EINVAL;
-
- if (poffset) {
- struct drm_mm_node *mem = nobj->priv;
-
- if (*poffset >= mem->size)
- return false;
-
- *poffset += mem->start;
- }
-
- return 0;
-}
-
-int
-nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_notifierobj_alloc *na = data;
- struct nouveau_channel *chan;
- int ret;
-
- /* completely unnecessary for these chipsets... */
- if (unlikely(dev_priv->card_type >= NV_C0))
- return -EINVAL;
-
- chan = nouveau_channel_get(file_priv, na->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
- &na->offset);
- nouveau_channel_put(&chan);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
index e60bc6ce900..709e5ac680e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_software.h
+++ b/drivers/gpu/drm/nouveau/nouveau_software.h
@@ -4,13 +4,15 @@
struct nouveau_software_priv {
struct nouveau_exec_engine base;
struct list_head vblank;
+ spinlock_t peephole_lock;
};
struct nouveau_software_chan {
struct list_head flip;
struct {
struct list_head list;
- struct nouveau_bo *bo;
+ u32 channel;
+ u32 ctxdma;
u32 offset;
u32 value;
u32 head;
@@ -18,32 +20,17 @@ struct nouveau_software_chan {
};
static inline void
-nouveau_software_vblank(struct drm_device *dev, int crtc)
-{
- struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
- struct nouveau_software_chan *pch, *tmp;
-
- list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
- if (pch->vblank.head != crtc)
- continue;
-
- nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset,
- pch->vblank.value);
- list_del(&pch->vblank.list);
- drm_vblank_put(dev, crtc);
- }
-}
-
-static inline void
nouveau_software_context_new(struct nouveau_software_chan *pch)
{
INIT_LIST_HEAD(&pch->flip);
+ INIT_LIST_HEAD(&pch->vblank.list);
}
static inline void
nouveau_software_create(struct nouveau_software_priv *psw)
{
INIT_LIST_HEAD(&psw->vblank);
+ spin_lock_init(&psw->peephole_lock);
}
static inline u16
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 19706f0532e..1cdfd6e757c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -1234,80 +1234,6 @@ int nouveau_unload(struct drm_device *dev)
return 0;
}
-int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_getparam *getparam = data;
-
- switch (getparam->param) {
- case NOUVEAU_GETPARAM_CHIPSET_ID:
- getparam->value = dev_priv->chipset;
- break;
- case NOUVEAU_GETPARAM_PCI_VENDOR:
- getparam->value = dev->pci_vendor;
- break;
- case NOUVEAU_GETPARAM_PCI_DEVICE:
- getparam->value = dev->pci_device;
- break;
- case NOUVEAU_GETPARAM_BUS_TYPE:
- if (drm_pci_device_is_agp(dev))
- getparam->value = NV_AGP;
- else if (pci_is_pcie(dev->pdev))
- getparam->value = NV_PCIE;
- else
- getparam->value = NV_PCI;
- break;
- case NOUVEAU_GETPARAM_FB_SIZE:
- getparam->value = dev_priv->fb_available_size;
- break;
- case NOUVEAU_GETPARAM_AGP_SIZE:
- getparam->value = dev_priv->gart_info.aper_size;
- break;
- case NOUVEAU_GETPARAM_VM_VRAM_BASE:
- getparam->value = 0; /* deprecated */
- break;
- case NOUVEAU_GETPARAM_PTIMER_TIME:
- getparam->value = dev_priv->engine.timer.read(dev);
- break;
- case NOUVEAU_GETPARAM_HAS_BO_USAGE:
- getparam->value = 1;
- break;
- case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = 1;
- break;
- case NOUVEAU_GETPARAM_GRAPH_UNITS:
- /* NV40 and NV50 versions are quite different, but register
- * address is the same. User is supposed to know the card
- * family anyway... */
- if (dev_priv->chipset >= 0x40) {
- getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
- break;
- }
- /* FALLTHRU */
- default:
- NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int
-nouveau_ioctl_setparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_setparam *setparam = data;
-
- switch (setparam->param) {
- default:
- NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param);
- return -EINVAL;
- }
-
- return 0;
-}
-
/* Wait until (value(reg) & mask) == val, up until timeout has hit */
bool
nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 4c31c63e552..43accc11102 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -215,7 +215,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool
-nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 8300266ffae..38f19479417 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -332,7 +332,7 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
}
static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (nv04_dac_in_use(encoder))
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 2258746016f..c2675623b7c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -179,7 +179,7 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
}
static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 696d7e7dc2a..67be5db021f 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -338,7 +338,7 @@ static int nv17_tv_mode_valid(struct drm_encoder *encoder,
}
static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 97a477b3d52..22cebd5dd69 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -527,7 +527,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
}
static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index eb216a446b8..2c36a6b92c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -175,7 +175,8 @@ nv50_dac_restore(struct drm_encoder *encoder)
}
static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 5c41612723b..b244d9968c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -646,7 +646,30 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
- nouveau_software_vblank(dev, crtc);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nouveau_software_chan *pch, *tmp;
+
+ list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
+ if (pch->vblank.head != crtc)
+ continue;
+
+ spin_lock(&psw->peephole_lock);
+ nv_wr32(dev, 0x001704, pch->vblank.channel);
+ nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma);
+ if (dev_priv->chipset == 0x50) {
+ nv_wr32(dev, 0x001570, pch->vblank.offset);
+ nv_wr32(dev, 0x001574, pch->vblank.value);
+ } else {
+ nv_wr32(dev, 0x060010, pch->vblank.offset);
+ nv_wr32(dev, 0x060014, pch->vblank.value);
+ }
+ spin_unlock(&psw->peephole_lock);
+
+ list_del(&pch->vblank.list);
+ drm_vblank_put(dev, crtc);
+ }
+
drm_handle_vblank(dev, crtc);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index d9cc2f2638d..437608d1dfe 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -299,7 +299,7 @@ static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
/* There must be a *lot* of these. Will take some time to gather them up. */
struct nouveau_enum nv50_data_error_names[] = {
- { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
+ { 0x00000003, "INVALID_OPERATION", NULL },
{ 0x00000004, "INVALID_VALUE", NULL },
{ 0x00000005, "INVALID_ENUM", NULL },
{ 0x00000008, "INVALID_OBJECT", NULL },
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
index 114d2517d4a..df554d9dacb 100644
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -36,9 +36,6 @@ struct nv50_software_priv {
struct nv50_software_chan {
struct nouveau_software_chan base;
- struct {
- struct nouveau_gpuobj *object;
- } vblank;
};
static int
@@ -51,11 +48,7 @@ mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
if (!gpuobj)
return -ENOENT;
- if (nouveau_notifier_offset(gpuobj, NULL))
- return -EINVAL;
-
- pch->vblank.object = gpuobj;
- pch->base.vblank.offset = ~0;
+ pch->base.vblank.ctxdma = gpuobj->cinst >> 4;
return 0;
}
@@ -63,11 +56,7 @@ static int
mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
{
struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
-
- if (nouveau_notifier_offset(pch->vblank.object, &data))
- return -ERANGE;
-
- pch->base.vblank.offset = data >> 2;
+ pch->base.vblank.offset = data;
return 0;
}
@@ -86,7 +75,7 @@ mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_device *dev = chan->dev;
- if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1)
+ if (data > 1)
return -EINVAL;
drm_vblank_get(dev, data);
@@ -116,7 +105,7 @@ nv50_software_context_new(struct nouveau_channel *chan, int engine)
return -ENOMEM;
nouveau_software_context_new(&pch->base);
- pch->base.vblank.bo = chan->notifier_bo;
+ pch->base.vblank.channel = chan->ramin->vinst >> 12;
chan->engctx[engine] = pch;
/* dma objects for display sync channel semaphore blocks */
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index a9514eaa74c..93240bde891 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -327,7 +327,8 @@ nv50_sor_restore(struct drm_encoder *encoder)
}
static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index edece9c616e..bbfcc73b670 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -117,18 +117,30 @@ nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
nv50_vm_flush_engine(dev, 0x0a);
}
+static struct nouveau_bitfield nv84_crypt_intr[] = {
+ { 0x00000001, "INVALID_STATE" },
+ { 0x00000002, "ILLEGAL_MTHD" },
+ { 0x00000004, "ILLEGAL_CLASS" },
+ { 0x00000080, "QUERY" },
+ { 0x00000100, "FAULT" },
+ {}
+};
+
static void
nv84_crypt_isr(struct drm_device *dev)
{
u32 stat = nv_rd32(dev, 0x102130);
u32 mthd = nv_rd32(dev, 0x102190);
u32 data = nv_rd32(dev, 0x102194);
- u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+ u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
int show = nouveau_ratelimit();
+ int chid = nv50_graph_isr_chid(dev, inst);
if (show) {
- NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- stat, mthd, data, inst);
+ NV_INFO(dev, "PCRYPT:");
+ nouveau_bitfield_print(nv84_crypt_intr, stat);
+ printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
+ chid, inst, mthd, data);
}
nv_wr32(dev, 0x102130, stat);
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
index abc36626fef..219850d5328 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -119,9 +119,9 @@ dispatch_dma:
// mthd 0x030c-0x0340, various stuff
.b16 0xc3 14
.b32 #ctx_src_address_high ~0x000000ff
-.b32 #ctx_src_address_low ~0xfffffff0
+.b32 #ctx_src_address_low ~0xffffffff
.b32 #ctx_dst_address_high ~0x000000ff
-.b32 #ctx_dst_address_low ~0xfffffff0
+.b32 #ctx_dst_address_low ~0xffffffff
.b32 #ctx_src_pitch ~0x0007ffff
.b32 #ctx_dst_pitch ~0x0007ffff
.b32 #ctx_xcnt ~0x0000ffff
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
index 1f33fbdc00b..37d6de3c9d6 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -1,37 +1,72 @@
-uint32_t nva3_pcopy_data[] = {
+u32 nva3_pcopy_data[] = {
+/* 0x0000: ctx_object */
0x00000000,
+/* 0x0004: ctx_dma */
+/* 0x0004: ctx_dma_query */
0x00000000,
+/* 0x0008: ctx_dma_src */
0x00000000,
+/* 0x000c: ctx_dma_dst */
0x00000000,
+/* 0x0010: ctx_query_address_high */
0x00000000,
+/* 0x0014: ctx_query_address_low */
0x00000000,
+/* 0x0018: ctx_query_counter */
0x00000000,
+/* 0x001c: ctx_src_address_high */
0x00000000,
+/* 0x0020: ctx_src_address_low */
0x00000000,
+/* 0x0024: ctx_src_pitch */
0x00000000,
+/* 0x0028: ctx_src_tile_mode */
0x00000000,
+/* 0x002c: ctx_src_xsize */
0x00000000,
+/* 0x0030: ctx_src_ysize */
0x00000000,
+/* 0x0034: ctx_src_zsize */
0x00000000,
+/* 0x0038: ctx_src_zoff */
0x00000000,
+/* 0x003c: ctx_src_xoff */
0x00000000,
+/* 0x0040: ctx_src_yoff */
0x00000000,
+/* 0x0044: ctx_src_cpp */
0x00000000,
+/* 0x0048: ctx_dst_address_high */
0x00000000,
+/* 0x004c: ctx_dst_address_low */
0x00000000,
+/* 0x0050: ctx_dst_pitch */
0x00000000,
+/* 0x0054: ctx_dst_tile_mode */
0x00000000,
+/* 0x0058: ctx_dst_xsize */
0x00000000,
+/* 0x005c: ctx_dst_ysize */
0x00000000,
+/* 0x0060: ctx_dst_zsize */
0x00000000,
+/* 0x0064: ctx_dst_zoff */
0x00000000,
+/* 0x0068: ctx_dst_xoff */
0x00000000,
+/* 0x006c: ctx_dst_yoff */
0x00000000,
+/* 0x0070: ctx_dst_cpp */
0x00000000,
+/* 0x0074: ctx_format */
0x00000000,
+/* 0x0078: ctx_swz_const0 */
0x00000000,
+/* 0x007c: ctx_swz_const1 */
0x00000000,
+/* 0x0080: ctx_xcnt */
0x00000000,
+/* 0x0084: ctx_ycnt */
0x00000000,
0x00000000,
0x00000000,
@@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0100: dispatch_table */
0x00010000,
0x00000000,
0x00000000,
@@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = {
0x00010162,
0x00000000,
0x00030060,
+/* 0x0128: dispatch_dma */
0x00010170,
0x00000000,
0x00010170,
@@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = {
0x0000001c,
0xffffff00,
0x00000020,
- 0x0000000f,
+ 0x00000000,
0x00000048,
0xffffff00,
0x0000004c,
- 0x0000000f,
+ 0x00000000,
0x00000024,
0xfff80000,
0x00000050,
@@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = {
0x00000800,
};
-uint32_t nva3_pcopy_code[] = {
+u32 nva3_pcopy_code[] = {
+/* 0x0000: main */
0x04fe04bd,
0x3517f000,
0xf10010fe,
@@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = {
0x17f11031,
0x27f01200,
0x0012d003,
+/* 0x002f: spin */
0xf40031f4,
0x0ef40028,
+/* 0x0035: ih */
0x8001cffd,
0xf40812c4,
0x21f4060b,
+/* 0x0041: ih_no_chsw */
0x0412c472,
0xf4060bf4,
+/* 0x004a: ih_no_cmd */
0x11c4c321,
0x4001d00c,
+/* 0x0052: swctx */
0x47f101f8,
0x4bfe7700,
0x0007fe00,
0xf00204b9,
0x01f40643,
0x0604fa09,
+/* 0x006b: swctx_load */
0xfa060ef4,
+/* 0x006e: swctx_done */
0x03f80504,
+/* 0x0072: chsw */
0x27f100f8,
0x23cf1400,
0x1e3fc800,
@@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = {
0x1e3af052,
0xf00023d0,
0x24d00147,
+/* 0x0093: chsw_no_unload */
0xcf00f880,
0x3dc84023,
0x220bf41e,
0xf40131f4,
0x57f05221,
0x0367f004,
+/* 0x00a8: chsw_load_ctx_dma */
0xa07856bc,
0xb6018068,
0x87d00884,
0x0162b600,
+/* 0x00bb: chsw_finish_load */
0xf0f018f4,
0x23d00237,
+/* 0x00c3: dispatch */
0xf100f880,
0xcf190037,
0x33cf4032,
@@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = {
0x1024b607,
0x010057f1,
0x74bd64bd,
+/* 0x00dc: dispatch_loop */
0x58005658,
0x50b60157,
0x0446b804,
@@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = {
0xb60276bb,
0x57bb0374,
0xdf0ef400,
+/* 0x0100: dispatch_valid_mthd */
0xb60246bb,
0x45bb0344,
0x01459800,
@@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = {
0xb0014658,
0x1bf40064,
0x00538009,
+/* 0x0127: dispatch_cmd */
0xf4300ef4,
0x55f90132,
0xf40c01f4,
+/* 0x0132: dispatch_invalid_bitfield */
0x25f0250e,
+/* 0x0135: dispatch_illegal_mthd */
0x0125f002,
+/* 0x0138: dispatch_error */
0x100047f1,
0xd00042d0,
0x27f04043,
0x0002d040,
+/* 0x0148: hostirq_wait */
0xf08002cf,
0x24b04024,
0xf71bf400,
+/* 0x0154: dispatch_done */
0x1d0027f1,
0xd00137f0,
0x00f80023,
+/* 0x0160: cmd_nop */
+/* 0x0162: cmd_pm_trigger */
0x27f100f8,
0x34bd2200,
0xd00233f0,
0x00f80023,
+/* 0x0170: cmd_dma */
0x012842b7,
0xf00145b6,
0x43801e39,
0x0040b701,
0x0644b606,
0xf80043d0,
+/* 0x0189: cmd_exec_set_format */
0xf030f400,
0xb00001b0,
0x01b00101,
@@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = {
0x70b63847,
0x0232f401,
0x94bd84bd,
+/* 0x01b4: ncomp_loop */
0xb60f4ac4,
0xb4bd0445,
+/* 0x01bc: bpc_loop */
0xf404a430,
0xa5ff0f18,
0x00cbbbc0,
0xf40231f4,
+/* 0x01ce: cmp_c0 */
0x1bf4220e,
0x10c7f00c,
0xf400cbbb,
+/* 0x01da: cmp_c1 */
0xa430160e,
0x0c18f406,
0xbb14c7f0,
0x0ef400cb,
+/* 0x01e9: cmp_zero */
0x80c7f107,
+/* 0x01ed: bpc_next */
0x01c83800,
0xb60180b6,
0xb5b801b0,
@@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = {
0x98110680,
0x68fd2008,
0x0502f400,
+/* 0x0216: dst_xcnt */
0x75fd64bd,
0x1c078000,
0xf10078fd,
@@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = {
0x980056d0,
0x56d01f06,
0x1030f440,
+/* 0x0276: cmd_exec_set_surface_tiled */
0x579800f8,
0x6879c70a,
0xb66478c7,
@@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = {
0x0e76b060,
0xf0091bf4,
0x0ef40477,
+/* 0x0291: xtile64 */
0x027cf00f,
0xfd1170b6,
0x77f00947,
+/* 0x029d: xtileok */
0x0f5a9806,
0xfd115b98,
0xb7f000ab,
@@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = {
0x67d00600,
0x0060b700,
0x0068d004,
+/* 0x0382: cmd_exec_set_surface_linear */
0x6cf000f8,
0x0260b702,
0x0864b602,
@@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = {
0xb70067d0,
0x98040060,
0x67d00957,
+/* 0x03ab: cmd_exec_wait */
0xf900f800,
0xf110f900,
0xb6080007,
+/* 0x03b6: loop */
0x01cf0604,
0x0114f000,
0xfcfa1bf4,
0xf800fc10,
+/* 0x03c5: cmd_exec_query */
0x0d34c800,
0xf5701bf4,
0xf103ab21,
@@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = {
0x47f10153,
0x44b60800,
0x0045d006,
+/* 0x0438: query_counter */
0x03ab21f5,
0x080c47f1,
0x980644b6,
@@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = {
0x47f10153,
0x44b60800,
0x0045d006,
+/* 0x0492: cmd_exec */
0x21f500f8,
0x3fc803ab,
0x0e0bf400,
0x018921f5,
0x020047f1,
+/* 0x04a7: cmd_exec_no_format */
0xf11e0ef4,
0xb6081067,
0x77f00664,
@@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = {
0x981c0780,
0x67d02007,
0x4067d000,
+/* 0x04c2: cmd_exec_init_src_surface */
0x32f444bd,
0xc854bd02,
0x0bf4043f,
0x8221f50a,
0x0a0ef403,
+/* 0x04d4: src_tiled */
0x027621f5,
+/* 0x04db: cmd_exec_init_dst_surface */
0xf40749f0,
0x57f00231,
0x083fc82c,
0xf50a0bf4,
0xf4038221,
+/* 0x04ee: dst_tiled */
0x21f50a0e,
0x49f00276,
+/* 0x04f5: cmd_exec_kick */
0x0057f108,
0x0654b608,
0xd0210698,
@@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = {
0xc80054d0,
0x0bf40c3f,
0xc521f507,
+/* 0x0519: cmd_exec_done */
+/* 0x051b: cmd_wrcache_flush */
0xf100f803,
0xbd220027,
0x0133f034,
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
index a8d17458ced..cd879f31bb3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -1,34 +1,65 @@
-uint32_t nvc0_pcopy_data[] = {
+u32 nvc0_pcopy_data[] = {
+/* 0x0000: ctx_object */
0x00000000,
+/* 0x0004: ctx_query_address_high */
0x00000000,
+/* 0x0008: ctx_query_address_low */
0x00000000,
+/* 0x000c: ctx_query_counter */
0x00000000,
+/* 0x0010: ctx_src_address_high */
0x00000000,
+/* 0x0014: ctx_src_address_low */
0x00000000,
+/* 0x0018: ctx_src_pitch */
0x00000000,
+/* 0x001c: ctx_src_tile_mode */
0x00000000,
+/* 0x0020: ctx_src_xsize */
0x00000000,
+/* 0x0024: ctx_src_ysize */
0x00000000,
+/* 0x0028: ctx_src_zsize */
0x00000000,
+/* 0x002c: ctx_src_zoff */
0x00000000,
+/* 0x0030: ctx_src_xoff */
0x00000000,
+/* 0x0034: ctx_src_yoff */
0x00000000,
+/* 0x0038: ctx_src_cpp */
0x00000000,
+/* 0x003c: ctx_dst_address_high */
0x00000000,
+/* 0x0040: ctx_dst_address_low */
0x00000000,
+/* 0x0044: ctx_dst_pitch */
0x00000000,
+/* 0x0048: ctx_dst_tile_mode */
0x00000000,
+/* 0x004c: ctx_dst_xsize */
0x00000000,
+/* 0x0050: ctx_dst_ysize */
0x00000000,
+/* 0x0054: ctx_dst_zsize */
0x00000000,
+/* 0x0058: ctx_dst_zoff */
0x00000000,
+/* 0x005c: ctx_dst_xoff */
0x00000000,
+/* 0x0060: ctx_dst_yoff */
0x00000000,
+/* 0x0064: ctx_dst_cpp */
0x00000000,
+/* 0x0068: ctx_format */
0x00000000,
+/* 0x006c: ctx_swz_const0 */
0x00000000,
+/* 0x0070: ctx_swz_const1 */
0x00000000,
+/* 0x0074: ctx_xcnt */
0x00000000,
+/* 0x0078: ctx_ycnt */
0x00000000,
0x00000000,
0x00000000,
@@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0100: dispatch_table */
0x00010000,
0x00000000,
0x00000000,
@@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = {
0x00000010,
0xffffff00,
0x00000014,
- 0x0000000f,
+ 0x00000000,
0x0000003c,
0xffffff00,
0x00000040,
- 0x0000000f,
+ 0x00000000,
0x00000018,
0xfff80000,
0x00000044,
@@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = {
0x00000800,
};
-uint32_t nvc0_pcopy_code[] = {
+u32 nvc0_pcopy_code[] = {
+/* 0x0000: main */
0x04fe04bd,
0x3517f000,
0xf10010fe,
@@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = {
0x17f11031,
0x27f01200,
0x0012d003,
+/* 0x002f: spin */
0xf40031f4,
0x0ef40028,
+/* 0x0035: ih */
0x8001cffd,
0xf40812c4,
0x21f4060b,
+/* 0x0041: ih_no_chsw */
0x0412c4ca,
0xf5070bf4,
+/* 0x004b: ih_no_cmd */
0xc4010221,
0x01d00c11,
+/* 0x0053: swctx */
0xf101f840,
0xfe770047,
0x47f1004b,
@@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = {
0xf00204b9,
0x01f40643,
0x0604fa09,
+/* 0x00c3: swctx_load */
0xfa060ef4,
+/* 0x00c6: swctx_done */
0x03f80504,
+/* 0x00ca: chsw */
0x27f100f8,
0x23cf1400,
0x1e3fc800,
@@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = {
0x1e3af053,
0xf00023d0,
0x24d00147,
+/* 0x00eb: chsw_no_unload */
0xcf00f880,
0x3dc84023,
0x090bf41e,
0xf40131f4,
+/* 0x00fa: chsw_finish_load */
0x37f05321,
0x8023d002,
+/* 0x0102: dispatch */
0x37f100f8,
0x32cf1900,
0x0033cf40,
0x07ff24e4,
0xf11024b6,
0xbd010057,
+/* 0x011b: dispatch_loop */
0x5874bd64,
0x57580056,
0x0450b601,
@@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = {
0xbb0f08f4,
0x74b60276,
0x0057bb03,
+/* 0x013f: dispatch_valid_mthd */
0xbbdf0ef4,
0x44b60246,
0x0045bb03,
@@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = {
0x64b00146,
0x091bf400,
0xf4005380,
+/* 0x0166: dispatch_cmd */
0x32f4300e,
0xf455f901,
0x0ef40c01,
+/* 0x0171: dispatch_invalid_bitfield */
0x0225f025,
+/* 0x0174: dispatch_illegal_mthd */
+/* 0x0177: dispatch_error */
0xf10125f0,
0xd0100047,
0x43d00042,
0x4027f040,
+/* 0x0187: hostirq_wait */
0xcf0002d0,
0x24f08002,
0x0024b040,
+/* 0x0193: dispatch_done */
0xf1f71bf4,
0xf01d0027,
0x23d00137,
+/* 0x019f: cmd_nop */
0xf800f800,
+/* 0x01a1: cmd_pm_trigger */
0x0027f100,
0xf034bd22,
0x23d00233,
+/* 0x01af: cmd_exec_set_format */
0xf400f800,
0x01b0f030,
0x0101b000,
@@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = {
0x3847c701,
0xf40170b6,
0x84bd0232,
+/* 0x01da: ncomp_loop */
0x4ac494bd,
0x0445b60f,
+/* 0x01e2: bpc_loop */
0xa430b4bd,
0x0f18f404,
0xbbc0a5ff,
0x31f400cb,
0x220ef402,
+/* 0x01f4: cmp_c0 */
0xf00c1bf4,
0xcbbb10c7,
0x160ef400,
+/* 0x0200: cmp_c1 */
0xf406a430,
0xc7f00c18,
0x00cbbb14,
+/* 0x020f: cmp_zero */
0xf1070ef4,
+/* 0x0213: bpc_next */
0x380080c7,
0x80b601c8,
0x01b0b601,
@@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = {
0x1d08980e,
0xf40068fd,
0x64bd0502,
+/* 0x023c: dst_xcnt */
0x800075fd,
0x78fd1907,
0x1057f100,
@@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = {
0x1c069800,
0xf44056d0,
0x00f81030,
+/* 0x029c: cmd_exec_set_surface_tiled */
0xc7075798,
0x78c76879,
0x0380b664,
0xb06077c7,
0x1bf40e76,
0x0477f009,
+/* 0x02b7: xtile64 */
0xf00f0ef4,
0x70b6027c,
0x0947fd11,
+/* 0x02c3: xtileok */
0x980677f0,
0x5b980c5a,
0x00abfd0e,
@@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = {
0xb70067d0,
0xd0040060,
0x00f80068,
+/* 0x03a8: cmd_exec_set_surface_linear */
0xb7026cf0,
0xb6020260,
0x57980864,
@@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = {
0x0060b700,
0x06579804,
0xf80067d0,
+/* 0x03d1: cmd_exec_wait */
0xf900f900,
0x0007f110,
0x0604b608,
+/* 0x03dc: loop */
0xf00001cf,
0x1bf40114,
0xfc10fcfa,
+/* 0x03eb: cmd_exec_query */
0xc800f800,
0x1bf40d34,
0xd121f570,
@@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = {
0x0153f026,
0x080047f1,
0xd00644b6,
+/* 0x045e: query_counter */
0x21f50045,
0x47f103d1,
0x44b6080c,
@@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = {
0x080047f1,
0xd00644b6,
0x00f80045,
+/* 0x04b8: cmd_exec */
0x03d121f5,
0xf4003fc8,
0x21f50e0b,
0x47f101af,
0x0ef40200,
+/* 0x04cd: cmd_exec_no_format */
0x1067f11e,
0x0664b608,
0x800177f0,
@@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = {
0x1d079819,
0xd00067d0,
0x44bd4067,
+/* 0x04e8: cmd_exec_init_src_surface */
0xbd0232f4,
0x043fc854,
0xf50a0bf4,
0xf403a821,
+/* 0x04fa: src_tiled */
0x21f50a0e,
0x49f0029c,
+/* 0x0501: cmd_exec_init_dst_surface */
0x0231f407,
0xc82c57f0,
0x0bf4083f,
0xa821f50a,
0x0a0ef403,
+/* 0x0514: dst_tiled */
0x029c21f5,
+/* 0x051b: cmd_exec_kick */
0xf10849f0,
0xb6080057,
0x06980654,
@@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = {
0x54d00546,
0x0c3fc800,
0xf5070bf4,
+/* 0x053f: cmd_exec_done */
0xf803eb21,
+/* 0x0541: cmd_wrcache_flush */
0x0027f100,
0xf034bd22,
0x23d00133,
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index c486d3ce3c2..d0d60e1e7f9 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -607,7 +607,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
}
static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@@ -938,7 +938,8 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
}
static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nvd0_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -1377,7 +1378,8 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
}
static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nvd0_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 88718fad5d6..2666a5308ab 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -71,7 +71,6 @@ static struct drm_driver driver = {
.irq_postinstall = r128_driver_irq_postinstall,
.irq_uninstall = r128_driver_irq_uninstall,
.irq_handler = r128_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = &r128_driver_fops,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 3904d7964a4..9e6f76fec52 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -457,22 +457,18 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
switch (pll_id) {
case ATOM_PPLL1:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
+ args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
args.v3.ucEnable = ATOM_DISABLE;
@@ -482,22 +478,18 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
switch (pll_id) {
case ATOM_PPLL1:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
+ args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
args.v2.ucEnable = ATOM_DISABLE;
@@ -1539,7 +1531,11 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
* crtc virtual pixel clock.
*/
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
- if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
+ if (ASIC_IS_DCE5(rdev))
+ return ATOM_DCPLL;
+ else if (ASIC_IS_DCE6(rdev))
+ return ATOM_PPLL0;
+ else if (rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
}
}
@@ -1628,7 +1624,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 5131b3b0f7d..7712cf5ab33 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -22,6 +22,7 @@
*
* Authors: Dave Airlie
* Alex Deucher
+ * Jerome Glisse
*/
#include "drmP.h"
#include "radeon_drm.h"
@@ -608,7 +609,7 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
}
void radeon_dp_set_link_config(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
@@ -654,7 +655,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
- DRM_ERROR("displayport link status failed\n");
return false;
}
@@ -833,8 +833,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
else
mdelay(dp_info->rd_interval * 4);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
break;
+ }
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
@@ -896,8 +898,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
else
mdelay(dp_info->rd_interval * 4);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
break;
+ }
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 486ccdf4aac..f9bc27fe269 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -58,7 +58,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
}
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -1392,10 +1392,18 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_ON:
/* some early dce3.2 boards have a bug in their transmitter control table */
if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
- ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+ ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ if (ASIC_IS_DCE6(rdev)) {
+ /* It seems we need to call ATOM_ENCODER_CMD_SETUP again
+ * before reenabling encoder on DPMS ON, otherwise we never
+ * get picture
+ */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ }
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- else
+ } else {
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
atombios_set_edp_panel_power(connector,
@@ -2234,7 +2242,7 @@ radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
}
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fb3d2e0434..e585a3b947e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -99,6 +99,14 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
+/**
+ * dce4_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
@@ -118,18 +126,49 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
}
+/**
+ * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (evergreen+).
+ * Enables the pageflip irq (vblank irq).
+ */
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
+/**
+ * evergreen_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (evergreen+).
+ * Disables the pageflip irq (vblank irq).
+ */
void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
+/**
+ * evergreen_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -214,6 +253,15 @@ int sumo_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
+/**
+ * sumo_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (sumo, trinity, SI).
+ * Used for profile mode only.
+ */
void sumo_pm_init_profile(struct radeon_device *rdev)
{
int idx;
@@ -265,6 +313,14 @@ void sumo_pm_init_profile(struct radeon_device *rdev)
rdev->pm.power_state[idx].num_clock_modes - 1;
}
+/**
+ * evergreen_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, etc.) (evergreen+).
+ */
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -292,6 +348,13 @@ void evergreen_pm_misc(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (evergreen+).
+ */
void evergreen_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -310,6 +373,13 @@ void evergreen_pm_prepare(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (evergreen+).
+ */
void evergreen_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -328,6 +398,15 @@ void evergreen_pm_finish(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
@@ -364,6 +443,14 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
return connected;
}
+/**
+ * evergreen_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
@@ -424,10 +511,19 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
}
}
+/**
+ * evergreen_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
void evergreen_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enabled = 0;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
@@ -436,73 +532,72 @@ void evergreen_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
- rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
- rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
- rdev->irq.hpd[3] = true;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
- rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
- rdev->irq.hpd[5] = true;
break;
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+ enabled |= 1 << radeon_connector->hpd.hpd;
}
- if (rdev->irq.installed)
- evergreen_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enabled);
}
+/**
+ * evergreen_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
void evergreen_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disabled = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
- rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
- rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
- rdev->irq.hpd[3] = false;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
- rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
- rdev->irq.hpd[5] = false;
break;
default:
break;
}
+ disabled |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disabled);
}
/* watermark setup */
@@ -933,6 +1028,14 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
}
+/**
+ * evergreen_bandwidth_update - update display watermarks callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the display watermarks based on the requested mode(s)
+ * (evergreen+).
+ */
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
@@ -956,6 +1059,15 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
@@ -1371,12 +1483,28 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
- /* FIXME: implement */
+
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
+ }
+
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -2188,6 +2316,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -2225,6 +2361,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
@@ -2348,20 +2492,20 @@ int evergreen_irq_set(struct radeon_device *rdev)
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
} else {
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -2369,32 +2513,32 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
- rdev->irq.pflip[2]) {
+ atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
- rdev->irq.pflip[3]) {
+ atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
- rdev->irq.pflip[4]) {
+ atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
- rdev->irq.pflip[5]) {
+ atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
@@ -2676,7 +2820,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
- unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
@@ -2684,22 +2827,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
return IRQ_NONE;
wptr = evergreen_get_ih_wptr(rdev);
+
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
+ return IRQ_NONE;
+
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- spin_lock_irqsave(&rdev->ih.lock, flags);
- if (rptr == wptr) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
-restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
/* display interrupts */
evergreen_irq_ack(rdev);
- rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
@@ -2716,7 +2858,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -2742,7 +2884,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -2768,7 +2910,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[2])
+ if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
@@ -2794,7 +2936,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[3])
+ if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
@@ -2820,7 +2962,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[4])
+ if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
@@ -2846,7 +2988,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[5])
+ if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
@@ -2986,7 +3128,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
@@ -2998,17 +3139,19 @@ restart_ih:
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
- /* make sure wptr hasn't changed while processing */
- wptr = evergreen_get_ih_wptr(rdev);
- if (wptr != rdev->ih.wptr)
- goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
return IRQ_HANDLED;
}
@@ -3096,13 +3239,11 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -3146,9 +3287,6 @@ int evergreen_suspend(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r600_audio_fini(rdev);
- /* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
r700_cp_stop(rdev);
ring->ready = false;
evergreen_irq_suspend(rdev);
@@ -3234,20 +3372,14 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -3274,7 +3406,7 @@ void evergreen_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -3289,7 +3421,8 @@ void evergreen_fini(struct radeon_device *rdev)
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
- u32 link_width_cntl, speed_cntl;
+ u32 link_width_cntl, speed_cntl, mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -3304,6 +3437,15 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return;
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & DRM_PCIE_SPEED_50))
+ return;
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 1e96bd458cf..89cb9feb565 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -622,7 +622,8 @@ int evergreen_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
- rdev->r600_blit.ring_size_common = 55; /* shaders + def state */
+ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+ rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -633,10 +634,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
rdev->r600_blit.max_dim = 16384;
- /* pin copy shader into vram if already initialized */
- if (rdev->r600_blit.shader_obj)
- goto done;
-
rdev->r600_blit.state_offset = 0;
if (rdev->family < CHIP_CAYMAN)
@@ -667,11 +664,26 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size += cayman_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("evergreen failed to allocate shader\n");
- return r;
+ /* pin copy shader into vram if not already initialized */
+ if (!rdev->r600_blit.shader_obj) {
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("evergreen failed to allocate shader\n");
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
}
DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
@@ -713,17 +725,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-done:
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b50b15c7049..d3bd098e4e1 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -88,6 +88,10 @@
#define CONFIG_MEMSIZE 0x5428
#define CP_COHER_BASE 0x85F8
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
+#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b7bf18e4021..9945d86d900 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -850,11 +850,20 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
+
+ if (ring->rptr_save_reg) {
+ uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ }
+
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -981,16 +990,41 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
cayman_cp_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
int cayman_cp_resume(struct radeon_device *rdev)
{
+ static const int ridx[] = {
+ RADEON_RING_TYPE_GFX_INDEX,
+ CAYMAN_RING_TYPE_CP1_INDEX,
+ CAYMAN_RING_TYPE_CP2_INDEX
+ };
+ static const unsigned cp_rb_cntl[] = {
+ CP_RB0_CNTL,
+ CP_RB1_CNTL,
+ CP_RB2_CNTL,
+ };
+ static const unsigned cp_rb_rptr_addr[] = {
+ CP_RB0_RPTR_ADDR,
+ CP_RB1_RPTR_ADDR,
+ CP_RB2_RPTR_ADDR
+ };
+ static const unsigned cp_rb_rptr_addr_hi[] = {
+ CP_RB0_RPTR_ADDR_HI,
+ CP_RB1_RPTR_ADDR_HI,
+ CP_RB2_RPTR_ADDR_HI
+ };
+ static const unsigned cp_rb_base[] = {
+ CP_RB0_BASE,
+ CP_RB1_BASE,
+ CP_RB2_BASE
+ };
struct radeon_ring *ring;
- u32 tmp;
- u32 rb_bufsz;
- int r;
+ int i, r;
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
@@ -1012,91 +1046,47 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27));
- /* ring 0 - compute and gfx */
- /* Set ring buffer size */
- ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
-#ifdef __BIG_ENDIAN
- tmp |= BUF_SWAP_32BIT;
-#endif
- WREG32(CP_RB0_CNTL, tmp);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB0_WPTR, ring->wptr);
-
/* set the wb address wether it's enabled or not */
- WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
- WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+ WREG32(SCRATCH_UMSK, 0xff);
- if (rdev->wb.enabled)
- WREG32(SCRATCH_UMSK, 0xff);
- else {
- tmp |= RB_NO_UPDATE;
- WREG32(SCRATCH_UMSK, 0);
- }
+ for (i = 0; i < 3; ++i) {
+ uint32_t rb_cntl;
+ uint64_t addr;
- mdelay(1);
- WREG32(CP_RB0_CNTL, tmp);
-
- WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
-
- ring->rptr = RREG32(CP_RB0_RPTR);
-
- /* ring1 - compute only */
- /* Set ring buffer size */
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ /* Set ring buffer size */
+ ring = &rdev->ring[ridx[i]];
+ rb_cntl = drm_order(ring->ring_size / 8);
+ rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
- tmp |= BUF_SWAP_32BIT;
+ rb_cntl |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB1_CNTL, tmp);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB1_WPTR, ring->wptr);
-
- /* set the wb address wether it's enabled or not */
- WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
- WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
-
- mdelay(1);
- WREG32(CP_RB1_CNTL, tmp);
+ WREG32(cp_rb_cntl[i], rb_cntl);
- WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
-
- ring->rptr = RREG32(CP_RB1_RPTR);
-
- /* ring2 - compute only */
- /* Set ring buffer size */
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
-#ifdef __BIG_ENDIAN
- tmp |= BUF_SWAP_32BIT;
-#endif
- WREG32(CP_RB2_CNTL, tmp);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB2_WPTR, ring->wptr);
+ /* set the wb address wether it's enabled or not */
+ addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
+ WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
+ WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
+ }
- /* set the wb address wether it's enabled or not */
- WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
- WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+ /* set the rb base addr, this causes an internal reset of ALL rings */
+ for (i = 0; i < 3; ++i) {
+ ring = &rdev->ring[ridx[i]];
+ WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
+ }
- mdelay(1);
- WREG32(CP_RB2_CNTL, tmp);
+ for (i = 0; i < 3; ++i) {
+ /* Initialize the ring buffer's read and write pointers */
+ ring = &rdev->ring[ridx[i]];
+ WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
- WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+ ring->rptr = ring->wptr = 0;
+ WREG32(ring->rptr_reg, ring->rptr);
+ WREG32(ring->wptr_reg, ring->wptr);
- ring->rptr = RREG32(CP_RB2_RPTR);
+ mdelay(1);
+ WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
+ }
/* start the rings */
cayman_cp_start(rdev);
@@ -1132,6 +1122,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1180,6 +1178,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
@@ -1291,17 +1297,17 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
- r = radeon_vm_manager_start(rdev);
- if (r)
+ r = radeon_vm_manager_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r)
@@ -1334,10 +1340,6 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- /* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- radeon_vm_manager_suspend(rdev);
- r600_blit_suspend(rdev);
cayman_cp_enable(rdev, false);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
evergreen_irq_suspend(rdev);
@@ -1413,17 +1415,7 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
- r = radeon_vm_manager_init(rdev);
- if (r) {
- dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
- }
-
r = cayman_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
@@ -1432,7 +1424,7 @@ int cayman_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
@@ -1463,7 +1455,7 @@ void cayman_fini(struct radeon_device *rdev)
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index a0b98066e20..870db340d37 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -236,6 +236,10 @@
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_COHER_CNTL2 0x85E8
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
+#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fb44e7e4908..8acb34fd3fd 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,19 @@ MODULE_FIRMWARE(FIRMWARE_R520);
#include "r100_track.h"
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * and others in some cases.
+ */
+
+/**
+ * r100_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r1xx-r4xx).
+ */
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
@@ -99,128 +112,49 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
}
-/* This files gather functions specifics to:
- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+/**
+ * r100_pre_page_flip - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (r1xx-r4xx).
+ * Enables the pageflip irq (vblank irq).
*/
-
-int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx,
- unsigned reg)
-{
- int r;
- u32 tile_flags = 0;
- u32 tmp;
- struct radeon_cs_reloc *reloc;
- u32 value;
-
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
-
- value = radeon_get_ib_value(p, idx);
- tmp = value & 0x003fffff;
- tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
-
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_DST_TILE_MACRO;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- tile_flags |= RADEON_DST_TILE_MICRO;
- }
-
- tmp |= tile_flags;
- p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
- } else
- p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
- return 0;
-}
-
-int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- int idx)
-{
- unsigned c, i;
- struct radeon_cs_reloc *reloc;
- struct r100_cs_track *track;
- int r = 0;
- volatile uint32_t *ib;
- u32 idx_value;
-
- ib = p->ib.ptr;
- track = (struct r100_cs_track *)p->track;
- c = radeon_get_ib_value(p, idx++) & 0x1F;
- if (c > 16) {
- DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- track->num_arrays = c;
- for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
-
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 1].robj = reloc->robj;
- track->arrays[i + 1].esize = idx_value >> 24;
- track->arrays[i + 1].esize &= 0x7F;
- }
- if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].esize &= 0x7F;
- }
- return r;
-}
-
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
+/**
+ * r100_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (r1xx-r4xx).
+ * Disables the pageflip irq (vblank irq).
+ */
void r100_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
+/**
+ * r100_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (r1xx-r4xx).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -247,6 +181,15 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
}
+/**
+ * r100_pm_get_dynpm_state - look up dynpm power state callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the optimal power state based on the
+ * current state of the GPU (r1xx-r5xx).
+ * Used for dynpm only.
+ */
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
@@ -329,6 +272,15 @@ void r100_pm_get_dynpm_state(struct radeon_device *rdev)
pcie_lanes);
}
+/**
+ * r100_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (r1xx-r3xx).
+ * Used for profile mode only.
+ */
void r100_pm_init_profile(struct radeon_device *rdev)
{
/* default */
@@ -368,6 +320,14 @@ void r100_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
+/**
+ * r100_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, pcie lanes, etc.) (r1xx-r4xx).
+ */
void r100_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
@@ -459,6 +419,13 @@ void r100_pm_misc(struct radeon_device *rdev)
}
}
+/**
+ * r100_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (r1xx-r4xx).
+ */
void r100_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -483,6 +450,13 @@ void r100_pm_prepare(struct radeon_device *rdev)
}
}
+/**
+ * r100_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (r1xx-r4xx).
+ */
void r100_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -507,6 +481,14 @@ void r100_pm_finish(struct radeon_device *rdev)
}
}
+/**
+ * r100_gui_idle - gui idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
+ * Returns true if idle, false if not.
+ */
bool r100_gui_idle(struct radeon_device *rdev)
{
if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
@@ -516,6 +498,15 @@ bool r100_gui_idle(struct radeon_device *rdev)
}
/* hpd for digital panel detect/disconnect */
+/**
+ * r100_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (r1xx-r4xx).
+ * Returns true if connected, false if not connected.
+ */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
@@ -535,6 +526,14 @@ bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
return connected;
}
+/**
+ * r100_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (r1xx-r4xx).
+ */
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
@@ -563,47 +562,47 @@ void r100_hpd_set_polarity(struct radeon_device *rdev,
}
}
+/**
+ * r100_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (r1xx-r4xx).
+ * Set the polarity, and enable the hpd interrupts.
+ */
void r100_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- switch (radeon_connector->hpd.hpd) {
- case RADEON_HPD_1:
- rdev->irq.hpd[0] = true;
- break;
- case RADEON_HPD_2:
- rdev->irq.hpd[1] = true;
- break;
- default:
- break;
- }
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
- if (rdev->irq.installed)
- r100_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enable);
}
+/**
+ * r100_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (r1xx-r4xx).
+ * Disable the hpd interrupts.
+ */
void r100_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- switch (radeon_connector->hpd.hpd) {
- case RADEON_HPD_1:
- rdev->irq.hpd[0] = false;
- break;
- case RADEON_HPD_2:
- rdev->irq.hpd[1] = false;
- break;
- default:
- break;
- }
+ disable |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disable);
}
/*
@@ -635,15 +634,6 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return radeon_gart_table_ram_alloc(rdev);
}
-/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
-void r100_enable_bm(struct radeon_device *rdev)
-{
- uint32_t tmp;
- /* Enable bus mastering */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
-}
-
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -705,18 +695,18 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
if (rdev->irq.hpd[0]) {
@@ -782,7 +772,6 @@ int r100_irq_process(struct radeon_device *rdev)
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
rdev->irq.gui_idle_acked = true;
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
@@ -792,7 +781,7 @@ int r100_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
@@ -801,7 +790,7 @@ int r100_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
@@ -883,7 +872,7 @@ int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence)
+ struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
@@ -947,7 +936,7 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring);
return r;
@@ -1192,6 +1181,14 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
}
ring->ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+ r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+ if (r) {
+ DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+ ring->rptr_save_reg = 0;
+ }
+ }
return 0;
}
@@ -1202,6 +1199,7 @@ void r100_cp_fini(struct radeon_device *rdev)
}
/* Disable ring */
r100_cp_disable(rdev);
+ radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
@@ -1223,6 +1221,112 @@ void r100_cp_disable(struct radeon_device *rdev)
/*
* CS functions
*/
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx,
+ unsigned reg)
+{
+ int r;
+ u32 tile_flags = 0;
+ u32 tmp;
+ struct radeon_cs_reloc *reloc;
+ u32 value;
+
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+
+ value = radeon_get_ib_value(p, idx);
+ tmp = value & 0x003fffff;
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_DST_TILE_MACRO;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ if (reg == RADEON_SRC_PITCH_OFFSET) {
+ DRM_ERROR("Cannot src blit from microtiled surface\n");
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ tile_flags |= RADEON_DST_TILE_MICRO;
+ }
+
+ tmp |= tile_flags;
+ p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
+ } else
+ p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
+ return 0;
+}
+
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int idx)
+{
+ unsigned c, i;
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ int r = 0;
+ volatile uint32_t *ib;
+ u32 idx_value;
+
+ ib = p->ib.ptr;
+ track = (struct r100_cs_track *)p->track;
+ c = radeon_get_ib_value(p, idx++) & 0x1F;
+ if (c > 16) {
+ DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ track->num_arrays = c;
+ for (i = 0; i < (c - 1); i+=2, idx+=3) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize &= 0x7F;
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 1].robj = reloc->robj;
+ track->arrays[i + 1].esize = idx_value >> 24;
+ track->arrays[i + 1].esize &= 0x7F;
+ }
+ if (c & 1) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].esize &= 0x7F;
+ }
+ return r;
+}
+
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
@@ -2048,6 +2152,379 @@ int r100_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+{
+ DRM_ERROR("pitch %d\n", t->pitch);
+ DRM_ERROR("use_pitch %d\n", t->use_pitch);
+ DRM_ERROR("width %d\n", t->width);
+ DRM_ERROR("width_11 %d\n", t->width_11);
+ DRM_ERROR("height %d\n", t->height);
+ DRM_ERROR("height_11 %d\n", t->height_11);
+ DRM_ERROR("num levels %d\n", t->num_levels);
+ DRM_ERROR("depth %d\n", t->txdepth);
+ DRM_ERROR("bpp %d\n", t->cpp);
+ DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
+ DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
+ DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+ DRM_ERROR("compress format %d\n", t->compress_format);
+}
+
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+ int block_width, block_height, block_bytes;
+ int wblocks, hblocks;
+ int min_wblocks;
+ int sz;
+
+ block_width = 4;
+ block_height = 4;
+
+ switch (compress_format) {
+ case R100_TRACK_COMP_DXT1:
+ block_bytes = 8;
+ min_wblocks = 4;
+ break;
+ default:
+ case R100_TRACK_COMP_DXT35:
+ block_bytes = 16;
+ min_wblocks = 2;
+ break;
+ }
+
+ hblocks = (h + block_height - 1) / block_height;
+ wblocks = (w + block_width - 1) / block_width;
+ if (wblocks < min_wblocks)
+ wblocks = min_wblocks;
+ sz = wblocks * hblocks * block_bytes;
+ return sz;
+}
+
+static int r100_cs_track_cube(struct radeon_device *rdev,
+ struct r100_cs_track *track, unsigned idx)
+{
+ unsigned face, w, h;
+ struct radeon_bo *cube_robj;
+ unsigned long size;
+ unsigned compress_format = track->textures[idx].compress_format;
+
+ for (face = 0; face < 5; face++) {
+ cube_robj = track->textures[idx].cube_info[face].robj;
+ w = track->textures[idx].cube_info[face].width;
+ h = track->textures[idx].cube_info[face].height;
+
+ if (compress_format) {
+ size = r100_track_compress_size(compress_format, w, h);
+ } else
+ size = w * h;
+ size *= track->textures[idx].cpp;
+
+ size += track->textures[idx].cube_info[face].offset;
+
+ if (size > radeon_bo_size(cube_robj)) {
+ DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+ size, radeon_bo_size(cube_robj));
+ r100_cs_track_texture_print(&track->textures[idx]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int r100_cs_track_texture_check(struct radeon_device *rdev,
+ struct r100_cs_track *track)
+{
+ struct radeon_bo *robj;
+ unsigned long size;
+ unsigned u, i, w, h, d;
+ int ret;
+
+ for (u = 0; u < track->num_texture; u++) {
+ if (!track->textures[u].enabled)
+ continue;
+ if (track->textures[u].lookup_disable)
+ continue;
+ robj = track->textures[u].robj;
+ if (robj == NULL) {
+ DRM_ERROR("No texture bound to unit %u\n", u);
+ return -EINVAL;
+ }
+ size = 0;
+ for (i = 0; i <= track->textures[u].num_levels; i++) {
+ if (track->textures[u].use_pitch) {
+ if (rdev->family < CHIP_R300)
+ w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
+ else
+ w = track->textures[u].pitch / (1 << i);
+ } else {
+ w = track->textures[u].width;
+ if (rdev->family >= CHIP_RV515)
+ w |= track->textures[u].width_11;
+ w = w / (1 << i);
+ if (track->textures[u].roundup_w)
+ w = roundup_pow_of_two(w);
+ }
+ h = track->textures[u].height;
+ if (rdev->family >= CHIP_RV515)
+ h |= track->textures[u].height_11;
+ h = h / (1 << i);
+ if (track->textures[u].roundup_h)
+ h = roundup_pow_of_two(h);
+ if (track->textures[u].tex_coord_type == 1) {
+ d = (1 << track->textures[u].txdepth) / (1 << i);
+ if (!d)
+ d = 1;
+ } else {
+ d = 1;
+ }
+ if (track->textures[u].compress_format) {
+
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
+ /* compressed textures are block based */
+ } else
+ size += w * h * d;
+ }
+ size *= track->textures[u].cpp;
+
+ switch (track->textures[u].tex_coord_type) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ if (track->separate_cube) {
+ ret = r100_cs_track_cube(rdev, track, u);
+ if (ret)
+ return ret;
+ } else
+ size *= 6;
+ break;
+ default:
+ DRM_ERROR("Invalid texture coordinate type %u for unit "
+ "%u\n", track->textures[u].tex_coord_type, u);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(robj)) {
+ DRM_ERROR("Texture of unit %u needs %lu bytes but is "
+ "%lu\n", u, size, radeon_bo_size(robj));
+ r100_cs_track_texture_print(&track->textures[u]);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+ unsigned i;
+ unsigned long size;
+ unsigned prim_walk;
+ unsigned nverts;
+ unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
+
+ if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
+ !track->blend_read_enable)
+ num_cb = 0;
+
+ for (i = 0; i < num_cb; i++) {
+ if (track->cb[i].robj == NULL) {
+ DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ return -EINVAL;
+ }
+ size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+ size += track->cb[i].offset;
+ if (size > radeon_bo_size(track->cb[i].robj)) {
+ DRM_ERROR("[drm] Buffer too small for color buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->cb[i].robj));
+ DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+ i, track->cb[i].pitch, track->cb[i].cpp,
+ track->cb[i].offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->cb_dirty = false;
+
+ if (track->zb_dirty && track->z_enabled) {
+ if (track->zb.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for z buffer !\n");
+ return -EINVAL;
+ }
+ size = track->zb.pitch * track->zb.cpp * track->maxy;
+ size += track->zb.offset;
+ if (size > radeon_bo_size(track->zb.robj)) {
+ DRM_ERROR("[drm] Buffer too small for z buffer "
+ "(need %lu have %lu) !\n", size,
+ radeon_bo_size(track->zb.robj));
+ DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
+ track->zb.pitch, track->zb.cpp,
+ track->zb.offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->zb_dirty = false;
+
+ if (track->aa_dirty && track->aaresolve) {
+ if (track->aa.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+ return -EINVAL;
+ }
+ /* I believe the format comes from colorbuffer0. */
+ size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+ size += track->aa.offset;
+ if (size > radeon_bo_size(track->aa.robj)) {
+ DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->aa.robj));
+ DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+ i, track->aa.pitch, track->cb[0].cpp,
+ track->aa.offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->aa_dirty = false;
+
+ prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
+ if (track->vap_vf_cntl & (1 << 14)) {
+ nverts = track->vap_alt_nverts;
+ } else {
+ nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+ }
+ switch (prim_walk) {
+ case 1:
+ for (i = 0; i < track->num_arrays; i++) {
+ size = track->arrays[i].esize * track->max_indx * 4;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ DRM_ERROR("Max indices %u\n", track->max_indx);
+ return -EINVAL;
+ }
+ }
+ break;
+ case 2:
+ for (i = 0; i < track->num_arrays; i++) {
+ size = track->arrays[i].esize * (nverts - 1) * 4;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ return -EINVAL;
+ }
+ }
+ break;
+ case 3:
+ size = track->vtx_size * nverts;
+ if (size != track->immd_dwords) {
+ DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
+ track->immd_dwords, size);
+ DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+ nverts, track->vtx_size);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+ prim_walk);
+ return -EINVAL;
+ }
+
+ if (track->tex_dirty) {
+ track->tex_dirty = false;
+ return r100_cs_track_texture_check(rdev, track);
+ }
+ return 0;
+}
+
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+ unsigned i, face;
+
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ track->tex_dirty = true;
+ track->aa_dirty = true;
+
+ if (rdev->family < CHIP_R300) {
+ track->num_cb = 1;
+ if (rdev->family <= CHIP_RS200)
+ track->num_texture = 3;
+ else
+ track->num_texture = 6;
+ track->maxy = 2048;
+ track->separate_cube = 1;
+ } else {
+ track->num_cb = 4;
+ track->num_texture = 16;
+ track->maxy = 4096;
+ track->separate_cube = 0;
+ track->aaresolve = false;
+ track->aa.robj = NULL;
+ }
+
+ for (i = 0; i < track->num_cb; i++) {
+ track->cb[i].robj = NULL;
+ track->cb[i].pitch = 8192;
+ track->cb[i].cpp = 16;
+ track->cb[i].offset = 0;
+ }
+ track->z_enabled = true;
+ track->zb.robj = NULL;
+ track->zb.pitch = 8192;
+ track->zb.cpp = 4;
+ track->zb.offset = 0;
+ track->vtx_size = 0x7F;
+ track->immd_dwords = 0xFFFFFFFFUL;
+ track->num_arrays = 11;
+ track->max_indx = 0x00FFFFFFUL;
+ for (i = 0; i < track->num_arrays; i++) {
+ track->arrays[i].robj = NULL;
+ track->arrays[i].esize = 0x7F;
+ }
+ for (i = 0; i < track->num_texture; i++) {
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ track->textures[i].pitch = 16536;
+ track->textures[i].width = 16536;
+ track->textures[i].height = 16536;
+ track->textures[i].width_11 = 1 << 11;
+ track->textures[i].height_11 = 1 << 11;
+ track->textures[i].num_levels = 12;
+ if (rdev->family <= CHIP_RS200) {
+ track->textures[i].tex_coord_type = 0;
+ track->textures[i].txdepth = 0;
+ } else {
+ track->textures[i].txdepth = 16;
+ track->textures[i].tex_coord_type = 1;
+ }
+ track->textures[i].cpp = 64;
+ track->textures[i].robj = NULL;
+ /* CS IB emission code makes sure texture unit are disabled */
+ track->textures[i].enabled = false;
+ track->textures[i].lookup_disable = false;
+ track->textures[i].roundup_w = true;
+ track->textures[i].roundup_h = true;
+ if (track->separate_cube)
+ for (face = 0; face < 5; face++) {
+ track->textures[i].cube_info[face].robj = NULL;
+ track->textures[i].cube_info[face].width = 16536;
+ track->textures[i].cube_info[face].height = 16536;
+ track->textures[i].cube_info[face].offset = 0;
+ }
+ }
+}
/*
* Global GPU functions
@@ -2175,6 +2652,15 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+}
+
void r100_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
@@ -3261,380 +3747,6 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
-static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
-{
- DRM_ERROR("pitch %d\n", t->pitch);
- DRM_ERROR("use_pitch %d\n", t->use_pitch);
- DRM_ERROR("width %d\n", t->width);
- DRM_ERROR("width_11 %d\n", t->width_11);
- DRM_ERROR("height %d\n", t->height);
- DRM_ERROR("height_11 %d\n", t->height_11);
- DRM_ERROR("num levels %d\n", t->num_levels);
- DRM_ERROR("depth %d\n", t->txdepth);
- DRM_ERROR("bpp %d\n", t->cpp);
- DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
- DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
- DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
- DRM_ERROR("compress format %d\n", t->compress_format);
-}
-
-static int r100_track_compress_size(int compress_format, int w, int h)
-{
- int block_width, block_height, block_bytes;
- int wblocks, hblocks;
- int min_wblocks;
- int sz;
-
- block_width = 4;
- block_height = 4;
-
- switch (compress_format) {
- case R100_TRACK_COMP_DXT1:
- block_bytes = 8;
- min_wblocks = 4;
- break;
- default:
- case R100_TRACK_COMP_DXT35:
- block_bytes = 16;
- min_wblocks = 2;
- break;
- }
-
- hblocks = (h + block_height - 1) / block_height;
- wblocks = (w + block_width - 1) / block_width;
- if (wblocks < min_wblocks)
- wblocks = min_wblocks;
- sz = wblocks * hblocks * block_bytes;
- return sz;
-}
-
-static int r100_cs_track_cube(struct radeon_device *rdev,
- struct r100_cs_track *track, unsigned idx)
-{
- unsigned face, w, h;
- struct radeon_bo *cube_robj;
- unsigned long size;
- unsigned compress_format = track->textures[idx].compress_format;
-
- for (face = 0; face < 5; face++) {
- cube_robj = track->textures[idx].cube_info[face].robj;
- w = track->textures[idx].cube_info[face].width;
- h = track->textures[idx].cube_info[face].height;
-
- if (compress_format) {
- size = r100_track_compress_size(compress_format, w, h);
- } else
- size = w * h;
- size *= track->textures[idx].cpp;
-
- size += track->textures[idx].cube_info[face].offset;
-
- if (size > radeon_bo_size(cube_robj)) {
- DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_bo_size(cube_robj));
- r100_cs_track_texture_print(&track->textures[idx]);
- return -1;
- }
- }
- return 0;
-}
-
-static int r100_cs_track_texture_check(struct radeon_device *rdev,
- struct r100_cs_track *track)
-{
- struct radeon_bo *robj;
- unsigned long size;
- unsigned u, i, w, h, d;
- int ret;
-
- for (u = 0; u < track->num_texture; u++) {
- if (!track->textures[u].enabled)
- continue;
- if (track->textures[u].lookup_disable)
- continue;
- robj = track->textures[u].robj;
- if (robj == NULL) {
- DRM_ERROR("No texture bound to unit %u\n", u);
- return -EINVAL;
- }
- size = 0;
- for (i = 0; i <= track->textures[u].num_levels; i++) {
- if (track->textures[u].use_pitch) {
- if (rdev->family < CHIP_R300)
- w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
- else
- w = track->textures[u].pitch / (1 << i);
- } else {
- w = track->textures[u].width;
- if (rdev->family >= CHIP_RV515)
- w |= track->textures[u].width_11;
- w = w / (1 << i);
- if (track->textures[u].roundup_w)
- w = roundup_pow_of_two(w);
- }
- h = track->textures[u].height;
- if (rdev->family >= CHIP_RV515)
- h |= track->textures[u].height_11;
- h = h / (1 << i);
- if (track->textures[u].roundup_h)
- h = roundup_pow_of_two(h);
- if (track->textures[u].tex_coord_type == 1) {
- d = (1 << track->textures[u].txdepth) / (1 << i);
- if (!d)
- d = 1;
- } else {
- d = 1;
- }
- if (track->textures[u].compress_format) {
-
- size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
- /* compressed textures are block based */
- } else
- size += w * h * d;
- }
- size *= track->textures[u].cpp;
-
- switch (track->textures[u].tex_coord_type) {
- case 0:
- case 1:
- break;
- case 2:
- if (track->separate_cube) {
- ret = r100_cs_track_cube(rdev, track, u);
- if (ret)
- return ret;
- } else
- size *= 6;
- break;
- default:
- DRM_ERROR("Invalid texture coordinate type %u for unit "
- "%u\n", track->textures[u].tex_coord_type, u);
- return -EINVAL;
- }
- if (size > radeon_bo_size(robj)) {
- DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_bo_size(robj));
- r100_cs_track_texture_print(&track->textures[u]);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
-{
- unsigned i;
- unsigned long size;
- unsigned prim_walk;
- unsigned nverts;
- unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
-
- if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
- !track->blend_read_enable)
- num_cb = 0;
-
- for (i = 0; i < num_cb; i++) {
- if (track->cb[i].robj == NULL) {
- DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
- return -EINVAL;
- }
- size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
- size += track->cb[i].offset;
- if (size > radeon_bo_size(track->cb[i].robj)) {
- DRM_ERROR("[drm] Buffer too small for color buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->cb[i].robj));
- DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
- i, track->cb[i].pitch, track->cb[i].cpp,
- track->cb[i].offset, track->maxy);
- return -EINVAL;
- }
- }
- track->cb_dirty = false;
-
- if (track->zb_dirty && track->z_enabled) {
- if (track->zb.robj == NULL) {
- DRM_ERROR("[drm] No buffer for z buffer !\n");
- return -EINVAL;
- }
- size = track->zb.pitch * track->zb.cpp * track->maxy;
- size += track->zb.offset;
- if (size > radeon_bo_size(track->zb.robj)) {
- DRM_ERROR("[drm] Buffer too small for z buffer "
- "(need %lu have %lu) !\n", size,
- radeon_bo_size(track->zb.robj));
- DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
- track->zb.pitch, track->zb.cpp,
- track->zb.offset, track->maxy);
- return -EINVAL;
- }
- }
- track->zb_dirty = false;
-
- if (track->aa_dirty && track->aaresolve) {
- if (track->aa.robj == NULL) {
- DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
- return -EINVAL;
- }
- /* I believe the format comes from colorbuffer0. */
- size = track->aa.pitch * track->cb[0].cpp * track->maxy;
- size += track->aa.offset;
- if (size > radeon_bo_size(track->aa.robj)) {
- DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->aa.robj));
- DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
- i, track->aa.pitch, track->cb[0].cpp,
- track->aa.offset, track->maxy);
- return -EINVAL;
- }
- }
- track->aa_dirty = false;
-
- prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
- if (track->vap_vf_cntl & (1 << 14)) {
- nverts = track->vap_alt_nverts;
- } else {
- nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
- }
- switch (prim_walk) {
- case 1:
- for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * track->max_indx * 4;
- if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
- return -EINVAL;
- }
- if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
- DRM_ERROR("Max indices %u\n", track->max_indx);
- return -EINVAL;
- }
- }
- break;
- case 2:
- for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * (nverts - 1) * 4;
- if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
- return -EINVAL;
- }
- if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
- return -EINVAL;
- }
- }
- break;
- case 3:
- size = track->vtx_size * nverts;
- if (size != track->immd_dwords) {
- DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
- track->immd_dwords, size);
- DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
- nverts, track->vtx_size);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
- prim_walk);
- return -EINVAL;
- }
-
- if (track->tex_dirty) {
- track->tex_dirty = false;
- return r100_cs_track_texture_check(rdev, track);
- }
- return 0;
-}
-
-void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
-{
- unsigned i, face;
-
- track->cb_dirty = true;
- track->zb_dirty = true;
- track->tex_dirty = true;
- track->aa_dirty = true;
-
- if (rdev->family < CHIP_R300) {
- track->num_cb = 1;
- if (rdev->family <= CHIP_RS200)
- track->num_texture = 3;
- else
- track->num_texture = 6;
- track->maxy = 2048;
- track->separate_cube = 1;
- } else {
- track->num_cb = 4;
- track->num_texture = 16;
- track->maxy = 4096;
- track->separate_cube = 0;
- track->aaresolve = false;
- track->aa.robj = NULL;
- }
-
- for (i = 0; i < track->num_cb; i++) {
- track->cb[i].robj = NULL;
- track->cb[i].pitch = 8192;
- track->cb[i].cpp = 16;
- track->cb[i].offset = 0;
- }
- track->z_enabled = true;
- track->zb.robj = NULL;
- track->zb.pitch = 8192;
- track->zb.cpp = 4;
- track->zb.offset = 0;
- track->vtx_size = 0x7F;
- track->immd_dwords = 0xFFFFFFFFUL;
- track->num_arrays = 11;
- track->max_indx = 0x00FFFFFFUL;
- for (i = 0; i < track->num_arrays; i++) {
- track->arrays[i].robj = NULL;
- track->arrays[i].esize = 0x7F;
- }
- for (i = 0; i < track->num_texture; i++) {
- track->textures[i].compress_format = R100_TRACK_COMP_NONE;
- track->textures[i].pitch = 16536;
- track->textures[i].width = 16536;
- track->textures[i].height = 16536;
- track->textures[i].width_11 = 1 << 11;
- track->textures[i].height_11 = 1 << 11;
- track->textures[i].num_levels = 12;
- if (rdev->family <= CHIP_RS200) {
- track->textures[i].tex_coord_type = 0;
- track->textures[i].txdepth = 0;
- } else {
- track->textures[i].txdepth = 16;
- track->textures[i].tex_coord_type = 1;
- }
- track->textures[i].cpp = 64;
- track->textures[i].robj = NULL;
- /* CS IB emission code makes sure texture unit are disabled */
- track->textures[i].enabled = false;
- track->textures[i].lookup_disable = false;
- track->textures[i].roundup_w = true;
- track->textures[i].roundup_h = true;
- if (track->separate_cube)
- for (face = 0; face < 5; face++) {
- track->textures[i].cube_info[face].robj = NULL;
- track->textures[i].cube_info[face].width = 16536;
- track->textures[i].cube_info[face].height = 16536;
- track->textures[i].cube_info[face].offset = 0;
- }
- }
-}
-
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
@@ -3679,6 +3791,12 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ if (ring->rptr_save_reg) {
+ u32 next_rptr = ring->wptr + 2 + 3;
+ radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
+ radeon_ring_write(ring, next_rptr);
+ }
+
radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(ring, ib->gpu_addr);
radeon_ring_write(ring, ib->length_dw);
@@ -3711,7 +3829,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
- r = radeon_ib_schedule(rdev, &ib);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
@@ -3740,12 +3858,6 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
-void r100_ib_fini(struct radeon_device *rdev)
-{
- radeon_ib_pool_suspend(rdev);
- radeon_ib_pool_fini(rdev);
-}
-
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
@@ -3905,13 +4017,11 @@ static int r100_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -3948,7 +4058,6 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -3961,7 +4070,7 @@ void r100_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
@@ -4068,20 +4177,14 @@ int r100_init(struct radeon_device *rdev)
}
r100_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r100_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a26144d0120..f0889259eb0 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -85,7 +85,7 @@ int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence)
+ struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
@@ -120,7 +120,7 @@ int r200_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring);
return r;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 97722a33e51..646a1927dda 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1391,13 +1391,11 @@ static int r300_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -1436,7 +1434,6 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1451,7 +1448,7 @@ void r300_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
@@ -1538,20 +1535,14 @@ int r300_init(struct radeon_device *rdev)
}
r300_set_reg_safe(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r300_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 99137be7a30..f2f5bf6d339 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -275,13 +275,11 @@ static int r420_startup(struct radeon_device *rdev)
}
r420_cp_errata_init(rdev);
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -324,7 +322,6 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -340,7 +337,7 @@ void r420_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
@@ -438,20 +435,14 @@ int r420_init(struct radeon_device *rdev)
}
r420_set_reg_safe(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index b5cf8375cd2..079d3c52c08 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -203,13 +203,11 @@ static int r520_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -311,20 +309,14 @@ int r520_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r520_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bff62729381..637280f541a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -709,6 +709,7 @@ void r600_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -729,28 +730,22 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
- rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
- rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
- rdev->irq.hpd[3] = true;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
- rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
- rdev->irq.hpd[5] = true;
break;
default:
break;
@@ -759,85 +754,73 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
- rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
- rdev->irq.hpd[2] = true;
break;
default:
break;
}
}
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
- if (rdev->irq.installed)
- r600_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enable);
}
void r600_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disable = 0;
- if (ASIC_IS_DCE3(rdev)) {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (ASIC_IS_DCE3(rdev)) {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
- rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
- rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
- rdev->irq.hpd[3] = false;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
- rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
- rdev->irq.hpd[5] = false;
break;
default:
break;
}
- }
- } else {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ } else {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
- rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
- rdev->irq.hpd[2] = false;
break;
default:
break;
}
}
+ disable |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disable);
}
/*
@@ -1306,6 +1289,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1349,6 +1340,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
rv515_mc_resume(rdev, &save);
return 0;
}
@@ -2172,18 +2171,29 @@ int r600_cp_resume(struct radeon_device *rdev)
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
{
u32 rb_bufsz;
+ int r;
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
+
+ if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+ r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+ if (r) {
+ DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+ ring->rptr_save_reg = 0;
+ }
+ }
}
void r600_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r600_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
@@ -2206,7 +2216,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i, ridx = radeon_ring_index(rdev, ring);
+ unsigned i;
int r;
r = radeon_scratch_get(rdev, &scratch);
@@ -2217,7 +2227,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, ring, 3);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
radeon_scratch_free(rdev, scratch);
return r;
}
@@ -2232,10 +2242,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ridx, scratch, tmp);
+ ring->idx, scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
@@ -2309,34 +2319,21 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence)
+ struct radeon_fence **fence)
{
+ struct radeon_semaphore *sem = NULL;
struct radeon_sa_bo *vb = NULL;
int r;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
if (r) {
return r;
}
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
- r600_blit_done_copy(rdev, fence, vb);
+ r600_blit_done_copy(rdev, fence, vb, sem);
return 0;
}
-void r600_blit_suspend(struct radeon_device *rdev)
-{
- int r;
-
- /* unpin shaders bo */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- }
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2419,13 +2416,11 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -2475,9 +2470,6 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
- /* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
@@ -2559,20 +2551,14 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -2588,7 +2574,7 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -2607,9 +2593,24 @@ void r600_fini(struct radeon_device *rdev)
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
+
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
+ }
- /* FIXME: implement */
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -2627,7 +2628,6 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
uint32_t tmp = 0;
unsigned i;
int r;
- int ring_index = radeon_ring_index(rdev, ring);
r = radeon_scratch_get(rdev, &scratch);
if (r) {
@@ -2635,7 +2635,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, ring_index, &ib, 256);
+ r = radeon_ib_get(rdev, ring->idx, &ib, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
@@ -2644,7 +2644,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = radeon_ib_schedule(rdev, &ib);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
@@ -2857,7 +2857,6 @@ void r600_disable_interrupts(struct radeon_device *rdev)
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false;
- rdev->ih.wptr = 0;
rdev->ih.rptr = 0;
}
@@ -3042,18 +3041,18 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK;
}
@@ -3309,7 +3308,6 @@ int r600_irq_process(struct radeon_device *rdev)
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
- unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
@@ -3321,24 +3319,21 @@ int r600_irq_process(struct radeon_device *rdev)
RREG32(IH_RB_WPTR);
wptr = r600_get_ih_wptr(rdev);
- rptr = rdev->ih.rptr;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
-
- spin_lock_irqsave(&rdev->ih.lock, flags);
- if (rptr == wptr) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
- }
-restart_ih:
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
/* Order reading of wptr vs. reading of IH ring data */
rmb();
/* display interrupts */
r600_irq_ack(rdev);
- rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
@@ -3355,7 +3350,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -3381,7 +3376,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -3480,7 +3475,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
@@ -3492,17 +3486,19 @@ restart_ih:
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
- /* make sure wptr hasn't changed while processing */
- wptr = r600_get_ih_wptr(rdev);
- if (wptr != rdev->ih.wptr)
- goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = r600_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
return IRQ_HANDLED;
}
@@ -3685,6 +3681,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
+ u32 mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -3703,6 +3701,15 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (rdev->family <= CHIP_R600)
return;
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & DRM_PCIE_SPEED_50))
+ return;
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
/* 55 nm r6xx asics */
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 03b6e0d3d50..2bef8549ddf 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,7 +512,8 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
- rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
+ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+ rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -523,10 +524,6 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.max_dim = 8192;
- /* pin copy shader into vram if already initialized */
- if (rdev->r600_blit.shader_obj)
- goto done;
-
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -551,11 +548,26 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("r600 failed to allocate shader\n");
- return r;
+ /* pin copy shader into vram if not already initialized */
+ if (rdev->r600_blit.shader_obj == NULL) {
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("r600 failed to allocate shader\n");
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
}
DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
@@ -586,17 +598,6 @@ int r600_blit_init(struct radeon_device *rdev)
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-done:
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -666,7 +667,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_sa_bo **vb)
+ struct radeon_fence **fence, struct radeon_sa_bo **vb,
+ struct radeon_semaphore **sem)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
@@ -689,34 +691,50 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
return r;
}
+ r = radeon_semaphore_create(rdev, sem);
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
+ return r;
+ }
+
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
+ radeon_semaphore_free(rdev, sem, NULL);
return r;
}
+ if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
+ radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
+ RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
+ } else {
+ radeon_semaphore_free(rdev, sem, NULL);
+ }
+
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
- struct radeon_sa_bo *vb)
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+ struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return;
}
radeon_ring_unlock_commit(rdev, ring);
- radeon_sa_bo_free(rdev, &vb, fence);
+ radeon_sa_bo_free(rdev, &vb, *fence);
+ radeon_semaphore_free(rdev, &sem, *fence);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 82a0a4c919c..e3558c3ef24 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -519,8 +519,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
if (rdev->irq.installed) {
/* if irq is available use it */
- rdev->irq.afmt[dig->afmt->id] = true;
- radeon_irq_set(rdev);
+ radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
}
dig->afmt->enabled = true;
@@ -556,8 +555,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
offset, radeon_encoder->encoder_id);
/* disable irq */
- rdev->irq.afmt[dig->afmt->id] = false;
- radeon_irq_set(rdev);
+ radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
/* Older chipsets not handled by AtomBIOS */
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 025fd5b6c08..4b116ae75fc 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -153,6 +153,9 @@
#define CONFIG_MEMSIZE 0x5428
#define CONFIG_CNTL 0x5424
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fefcca55c1e..5431af29240 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -113,7 +113,6 @@ extern int radeon_lockup_timeout;
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
-#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
/* internal ring indices */
/* r1xx+ has gfx CP ring */
@@ -160,48 +159,6 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
#endif
bool radeon_get_bios(struct radeon_device *rdev);
-
-/*
- * Mutex which allows recursive locking from the same process.
- */
-struct radeon_mutex {
- struct mutex mutex;
- struct task_struct *owner;
- int level;
-};
-
-static inline void radeon_mutex_init(struct radeon_mutex *mutex)
-{
- mutex_init(&mutex->mutex);
- mutex->owner = NULL;
- mutex->level = 0;
-}
-
-static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
-{
- if (mutex_trylock(&mutex->mutex)) {
- /* The mutex was unlocked before, so it's ours now */
- mutex->owner = current;
- } else if (mutex->owner != current) {
- /* Another process locked the mutex, take it */
- mutex_lock(&mutex->mutex);
- mutex->owner = current;
- }
- /* Otherwise the mutex was already locked by this process */
-
- mutex->level++;
-}
-
-static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
-{
- if (--mutex->level > 0)
- return;
-
- mutex->owner = NULL;
- mutex_unlock(&mutex->mutex);
-}
-
-
/*
* Dummy page
*/
@@ -258,8 +215,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
- /* seq is protected by ring emission lock */
- uint64_t seq;
+ /* sync_seq is protected by ring emission lock */
+ uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
unsigned long last_activity;
bool initialized;
@@ -277,19 +234,39 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
-int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
+void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
+static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
+ struct radeon_fence *b)
+{
+ if (!a) {
+ return b;
+ }
+
+ if (!b) {
+ return a;
+ }
+
+ BUG_ON(a->ring != b->ring);
+
+ if (a->seq > b->seq) {
+ return a;
+ } else {
+ return b;
+ }
+}
/*
* Tiling registers
@@ -385,7 +362,7 @@ struct radeon_bo_list {
* alignment).
*/
struct radeon_sa_manager {
- spinlock_t lock;
+ wait_queue_head_t wq;
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
@@ -451,10 +428,9 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- bool sync_to[RADEON_NUM_RINGS],
- int dst_ring);
+ int signaler, int waiter);
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
+ struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
/*
@@ -597,21 +573,18 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_AFMT_BLOCKS 6
struct radeon_irq {
- bool installed;
- bool sw_int[RADEON_NUM_RINGS];
- bool crtc_vblank_int[RADEON_MAX_CRTCS];
- bool pflip[RADEON_MAX_CRTCS];
- wait_queue_head_t vblank_queue;
- bool hpd[RADEON_MAX_HPD_PINS];
- bool gui_idle;
- bool gui_idle_acked;
- wait_queue_head_t idle_queue;
- bool afmt[RADEON_MAX_AFMT_BLOCKS];
- spinlock_t sw_lock;
- int sw_refcount[RADEON_NUM_RINGS];
- union radeon_irq_stat_regs stat_regs;
- spinlock_t pflip_lock[RADEON_MAX_CRTCS];
- int pflip_refcount[RADEON_MAX_CRTCS];
+ bool installed;
+ spinlock_t lock;
+ atomic_t ring_int[RADEON_NUM_RINGS];
+ bool crtc_vblank_int[RADEON_MAX_CRTCS];
+ atomic_t pflip[RADEON_MAX_CRTCS];
+ wait_queue_head_t vblank_queue;
+ bool hpd[RADEON_MAX_HPD_PINS];
+ bool gui_idle;
+ bool gui_idle_acked;
+ wait_queue_head_t idle_queue;
+ bool afmt[RADEON_MAX_AFMT_BLOCKS];
+ union radeon_irq_stat_regs stat_regs;
};
int radeon_irq_kms_init(struct radeon_device *rdev);
@@ -620,6 +593,11 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
/*
* CP & rings.
@@ -630,9 +608,11 @@ struct radeon_ib {
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
+ int ring;
struct radeon_fence *fence;
unsigned vm_id;
bool is_const_ib;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
@@ -642,6 +622,9 @@ struct radeon_ring {
unsigned rptr;
unsigned rptr_offs;
unsigned rptr_reg;
+ unsigned rptr_save_reg;
+ u64 next_rptr_gpu_addr;
+ volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned wptr_reg;
@@ -657,6 +640,7 @@ struct radeon_ring {
u32 ptr_reg_shift;
u32 ptr_reg_mask;
u32 nop;
+ u32 idx;
};
/*
@@ -690,6 +674,7 @@ struct radeon_vm_funcs {
};
struct radeon_vm_manager {
+ struct mutex lock;
struct list_head lru_vm;
uint32_t use_bitmap;
struct radeon_sa_manager sa_manager;
@@ -718,13 +703,10 @@ struct r600_ih {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
- unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
uint32_t ptr_mask;
- spinlock_t lock;
+ atomic_t lock;
bool enabled;
};
@@ -757,8 +739,6 @@ struct r600_blit {
u32 state_len;
};
-void r600_blit_suspend(struct radeon_device *rdev);
-
/*
* SI RLC stuff
*/
@@ -774,14 +754,14 @@ struct si_rlc {
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
-int radeon_ib_pool_start(struct radeon_device *rdev);
-int radeon_ib_pool_suspend(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
@@ -793,6 +773,10 @@ int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
void radeon_ring_lockup_update(struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+ uint32_t **data);
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
@@ -891,6 +875,7 @@ struct radeon_wb {
};
#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_RING0_NEXT_RPTR 256
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
@@ -1039,11 +1024,12 @@ struct radeon_power_state {
struct radeon_pm {
struct mutex mutex;
+ /* write locked while reprogramming mclk */
+ struct rw_semaphore mclk_lock;
u32 active_crtcs;
int active_crtc_count;
int req_vblank;
bool vblank_sync;
- bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -1192,20 +1178,20 @@ struct radeon_asic {
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
u32 blit_ring_index;
int (*dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
u32 dma_ring_index;
/* method used for bo copy */
int (*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
@@ -1467,6 +1453,7 @@ struct radeon_device {
struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev;
+ struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
enum radeon_family family;
@@ -1512,7 +1499,6 @@ struct radeon_device {
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
- struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool shutdown;
@@ -1534,7 +1520,6 @@ struct radeon_device {
struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
- struct mutex vram_mutex;
bool audio_enabled;
struct r600_audio audio_status; /* audio stuff */
struct notifier_block acpi_nb;
@@ -1785,8 +1770,6 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
-int radeon_vm_manager_start(struct radeon_device *rdev);
-int radeon_vm_manager_suspend(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f533df5f7d5..973417c4b01 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -40,6 +40,16 @@
/*
* Registers accessors functions.
*/
+/**
+ * radeon_invalid_rreg - dummy reg read function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
@@ -47,6 +57,16 @@ static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
return 0;
}
+/**
+ * radeon_invalid_wreg - dummy reg write function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ */
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
@@ -54,6 +74,14 @@ static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32
BUG_ON(1);
}
+/**
+ * radeon_register_accessor_init - sets up the register accessor callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the register accessor callbacks for various register
+ * apertures. Not all asics have all apertures (all asics).
+ */
static void radeon_register_accessor_init(struct radeon_device *rdev)
{
rdev->mc_rreg = &radeon_invalid_rreg;
@@ -102,6 +130,14 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
/* helper to disable agp */
+/**
+ * radeon_agp_disable - AGP disable helper function
+ *
+ * @rdev: radeon device pointer
+ *
+ * Removes AGP flags and changes the gart callbacks on AGP
+ * cards when using the internal gart rather than AGP (all asics).
+ */
void radeon_agp_disable(struct radeon_device *rdev)
{
rdev->flags &= ~RADEON_IS_AGP;
@@ -1608,6 +1644,16 @@ static struct radeon_asic si_asic = {
},
};
+/**
+ * radeon_asic_init - register asic specific callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Registers the appropriate asic specific callbacks for each
+ * chip family. Also sets other asics specific info like the number
+ * of crtcs and the register aperture accessors (all asics).
+ * Returns 0 for success.
+ */
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e76a941ef14..f4af2431043 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -79,7 +79,7 @@ int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -103,7 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
-void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -144,7 +143,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
void r200_set_safe_registers(struct radeon_device *rdev);
/*
@@ -318,7 +317,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence *fence);
+ unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -363,9 +362,10 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_sa_bo **vb);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
- struct radeon_sa_bo *vb);
+ struct radeon_fence **fence, struct radeon_sa_bo **vb,
+ struct radeon_semaphore **sem);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+ struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages,
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 364f5b1a04b..bedda9caadd 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -45,20 +45,14 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
- r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev));
- if (r)
- return r;
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- fence);
+ &fence);
break;
case RADEON_BENCHMARK_COPY_BLIT:
- r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev));
- if (r)
- return r;
r = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- fence);
+ &fence);
break;
default:
DRM_ERROR("Unknown copy method\n");
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2914c5761cf..895e628b60f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -64,14 +64,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* just deal with DP (not eDP) here. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- int saved_dpms = connector->dpms;
-
- /* Only turn off the display it it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- else if (radeon_dp_needs_link_train(radeon_connector))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- connector->dpms = saved_dpms;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ /* if existing sink type was not DP no need to retrain */
+ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return;
+
+ /* first get sink type as it may be reset after (un)plug */
+ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ /* don't do anything if sink is not display port, i.e.,
+ * passive dp->(dvi|hdmi) adaptor
+ */
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ int saved_dpms = connector->dpms;
+ /* Only turn off the display if it's physically disconnected */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ } else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* set it to OFF so that drm_helper_connector_dpms()
+ * won't return immediately since the current state
+ * is ON at this point.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ connector->dpms = saved_dpms;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 142f89462aa..8a4c49ef0cc 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -115,36 +115,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- bool sync_to_ring[RADEON_NUM_RINGS] = { };
- bool need_sync = false;
- int i, r;
+ int i;
for (i = 0; i < p->nrelocs; i++) {
- struct radeon_fence *fence;
+ struct radeon_fence *a, *b;
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue;
- fence = p->relocs[i].robj->tbo.sync_obj;
- if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- need_sync = true;
- }
- }
-
- if (!need_sync) {
- return 0;
- }
-
- r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
- if (r) {
- return r;
+ a = p->relocs[i].robj->tbo.sync_obj;
+ b = p->ib.sync_to[a->ring];
+ p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
}
-
- return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
- sync_to_ring, p->ring);
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -368,16 +352,13 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Invalid command stream !\n");
return r;
}
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
+ radeon_cs_sync_rings(parser);
parser->ib.vm_id = 0;
- r = radeon_ib_schedule(rdev, &parser->ib);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
- return 0;
+ return r;
}
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
@@ -459,6 +440,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r;
}
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
r = radeon_vm_bind(rdev, vm);
if (r) {
@@ -468,30 +450,26 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (r) {
goto out;
}
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
+ radeon_cs_sync_rings(parser);
+
+ parser->ib.vm_id = vm->id;
+ /* ib pool is bind at 0 in virtual address space,
+ * so gpu_addr is the offset inside the pool bo
+ */
+ parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
parser->const_ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space to gpu_addr is the
- * offset inside the pool bo
+ /* ib pool is bind at 0 in virtual address space,
+ * so gpu_addr is the offset inside the pool bo
*/
parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
- r = radeon_ib_schedule(rdev, &parser->const_ib);
- if (r)
- goto out;
+ r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+ } else {
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
}
- parser->ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space to gpu_addr is the
- * offset inside the pool bo
- */
- parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
- parser->ib.is_const_ib = false;
- r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
@@ -499,7 +477,8 @@ out:
}
vm->fence = radeon_fence_ref(parser->ib.fence);
}
- mutex_unlock(&fpriv->vm.mutex);
+ mutex_unlock(&vm->mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
return r;
}
@@ -519,9 +498,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_parser parser;
int r;
- radeon_mutex_lock(&rdev->cs_mutex);
+ down_read(&rdev->exclusive_lock);
if (!rdev->accel_working) {
- radeon_mutex_unlock(&rdev->cs_mutex);
+ up_read(&rdev->exclusive_lock);
return -EBUSY;
}
/* initialize parser */
@@ -534,8 +513,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_parser_relocs(&parser);
@@ -543,8 +522,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_ib_chunk(rdev, &parser);
@@ -557,8 +536,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
out:
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 42acc6449dd..711e95ad39b 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -262,8 +262,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
if (!(cursor_end & 0x7f))
w--;
}
- if (w <= 0)
+ if (w <= 0) {
w = 1;
+ cursor_end = x - xorigin + w;
+ if (!(cursor_end & 0x7f)) {
+ x--;
+ WARN_ON_ONCE(x < 0);
+ }
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 066c98b888a..742af8244e8 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -96,8 +96,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
-/*
- * Clear GPU surface registers.
+/**
+ * radeon_surface_init - Clear GPU surface registers.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clear GPU surface registers (r1xx-r5xx).
*/
void radeon_surface_init(struct radeon_device *rdev)
{
@@ -119,6 +123,13 @@ void radeon_surface_init(struct radeon_device *rdev)
/*
* GPU scratch registers helpers function.
*/
+/**
+ * radeon_scratch_init - Init scratch register driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init CP scratch register driver information (r1xx-r5xx)
+ */
void radeon_scratch_init(struct radeon_device *rdev)
{
int i;
@@ -136,6 +147,15 @@ void radeon_scratch_init(struct radeon_device *rdev)
}
}
+/**
+ * radeon_scratch_get - Allocate a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Allocate a CP scratch register for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
int i;
@@ -150,6 +170,14 @@ int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
return -EINVAL;
}
+/**
+ * radeon_scratch_free - Free a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Free a CP scratch register allocated for use by the driver (all asics)
+ */
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
int i;
@@ -162,6 +190,20 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
}
+/*
+ * radeon_wb_*()
+ * Writeback is the the method by which the the GPU updates special pages
+ * in memory with the status of certain GPU events (fences, ring pointers,
+ * etc.).
+ */
+
+/**
+ * radeon_wb_disable - Disable Writeback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback (all asics). Used for suspend.
+ */
void radeon_wb_disable(struct radeon_device *rdev)
{
int r;
@@ -177,6 +219,14 @@ void radeon_wb_disable(struct radeon_device *rdev)
rdev->wb.enabled = false;
}
+/**
+ * radeon_wb_fini - Disable Writeback and free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
@@ -187,6 +237,15 @@ void radeon_wb_fini(struct radeon_device *rdev)
}
}
+/**
+ * radeon_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
int radeon_wb_init(struct radeon_device *rdev)
{
int r;
@@ -355,6 +414,15 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
/*
* GPU helpers function.
*/
+/**
+ * radeon_card_posted - check if the hw has already been initialized
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic has been initialized (all asics).
+ * Used at driver startup.
+ * Returns true if initialized or false if not.
+ */
bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
@@ -404,6 +472,14 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+/**
+ * radeon_update_bandwidth_info - update display bandwidth params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Used when sclk/mclk are switched or display modes are set.
+ * params are used to calculate display watermarks (all asics)
+ */
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
@@ -424,6 +500,15 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
}
}
+/**
+ * radeon_boot_test_post_card - check and possibly initialize the hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic is initialized and if not, attempt to initialize
+ * it (all asics).
+ * Returns true if initialized or false if not.
+ */
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
@@ -442,6 +527,16 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
}
}
+/**
+ * radeon_dummy_page_init - init dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
int radeon_dummy_page_init(struct radeon_device *rdev)
{
if (rdev->dummy_page.page)
@@ -460,6 +555,13 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_dummy_page_fini - free dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
void radeon_dummy_page_fini(struct radeon_device *rdev)
{
if (rdev->dummy_page.page == NULL)
@@ -472,6 +574,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See radeon_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -481,6 +600,15 @@ static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -488,6 +616,15 @@ static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
rdev->pll_wreg(rdev, reg, val);
}
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -497,6 +634,15 @@ static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -504,6 +650,15 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
rdev->mc_wreg(rdev, reg, val);
}
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -511,6 +666,15 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
WREG32(reg*4, val);
}
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -520,6 +684,15 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -527,6 +700,15 @@ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
WREG32_IO(reg*4, val);
}
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -536,6 +718,16 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * radeon_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
@@ -569,6 +761,15 @@ int radeon_atombios_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
@@ -578,17 +779,50 @@ void radeon_atombios_fini(struct radeon_device *rdev)
kfree(rdev->mode_info.atom_card_info);
}
+/* COMBIOS */
+/*
+ * COMBIOS is the bios format prior to ATOM. It provides
+ * command tables similar to ATOM, but doesn't have a unified
+ * parser. See radeon_combios.c
+ */
+
+/**
+ * radeon_combios_init - init the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info for combios (r1xx-r3xx).
+ * Returns 0 on sucess.
+ * Called at driver startup.
+ */
int radeon_combios_init(struct radeon_device *rdev)
{
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
return 0;
}
+/**
+ * radeon_combios_fini - free the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info for combios (r1xx-r3xx).
+ * Called at driver shutdown.
+ */
void radeon_combios_fini(struct radeon_device *rdev)
{
}
-/* if we get transitioned to only one device, tak VGA back */
+/* if we get transitioned to only one device, take VGA back */
+/**
+ * radeon_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: radeon_device pointer
+ * @state: enable/disable vga decode
+ *
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
struct radeon_device *rdev = cookie;
@@ -600,6 +834,14 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+/**
+ * radeon_check_arguments - validate module params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
@@ -666,6 +908,15 @@ void radeon_check_arguments(struct radeon_device *rdev)
}
}
+/**
+ * radeon_switcheroo_set_state - set switcheroo state
+ *
+ * @pdev: pci dev pointer
+ * @state: vga switcheroo state
+ *
+ * Callback for the switcheroo driver. Suspends or resumes the
+ * the asics before or after it is powered up using ACPI methods.
+ */
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -686,6 +937,15 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
}
}
+/**
+ * radeon_switcheroo_can_switch - see if switcheroo state can change
+ *
+ * @pdev: pci dev pointer
+ *
+ * Callback for the switcheroo driver. Check of the switcheroo
+ * state can be changed.
+ * Returns true if the state can be changed, false if not.
+ */
static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -703,6 +963,18 @@ static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
.can_switch = radeon_switcheroo_can_switch,
};
+/**
+ * radeon_device_init - initialize the driver
+ *
+ * @rdev: radeon_device pointer
+ * @pdev: drm dev pointer
+ * @pdev: pci dev pointer
+ * @flags: driver flags
+ *
+ * Initializes the driver info and hw (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver startup.
+ */
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -721,6 +993,10 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->accel_working = false;
+ /* set up ring ids */
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ rdev->ring[i].idx = i;
+ }
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -728,20 +1004,20 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
- if (rdev->family >= CHIP_R600)
- spin_lock_init(&rdev->ih.lock);
+ atomic_set(&rdev->ih.lock, 0);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
- mutex_init(&rdev->vram_mutex);
+ init_rwsem(&rdev->pm.mclk_lock);
+ init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
r = radeon_gem_init(rdev);
if (r)
return r;
/* initialize vm here */
+ mutex_init(&rdev->vm_manager.lock);
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
@@ -822,6 +1098,10 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
@@ -847,6 +1127,14 @@ int radeon_device_init(struct radeon_device *rdev,
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+/**
+ * radeon_device_fini - tear down the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the driver info (all asics).
+ * Called at driver shutdown.
+ */
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
@@ -868,6 +1156,16 @@ void radeon_device_fini(struct radeon_device *rdev)
/*
* Suspend & resume.
*/
+/**
+ * radeon_suspend_kms - initiate device suspend
+ *
+ * @pdev: drm dev pointer
+ * @state: suspend state
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev;
@@ -942,10 +1240,20 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
return 0;
}
+/**
+ * radeon_resume_kms - initiate device resume
+ *
+ * @pdev: drm dev pointer
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
int radeon_resume_kms(struct drm_device *dev)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -960,6 +1268,11 @@ int radeon_resume_kms(struct drm_device *dev)
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
+
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
@@ -984,30 +1297,77 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
+/**
+ * radeon_gpu_reset - reset the asic
+ *
+ * @rdev: radeon device pointer
+ *
+ * Attempt the reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
int radeon_gpu_reset(struct radeon_device *rdev)
{
- int r;
+ unsigned ring_sizes[RADEON_NUM_RINGS];
+ uint32_t *ring_data[RADEON_NUM_RINGS];
+
+ bool saved = false;
+
+ int i, r;
int resched;
+ down_write(&rdev->exclusive_lock);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
+ &ring_data[i]);
+ if (ring_sizes[i]) {
+ saved = true;
+ dev_info(rdev->dev, "Saved %d dwords of commands "
+ "on ring %d.\n", ring_sizes[i], i);
+ }
+ }
+
+retry:
r = radeon_asic_reset(rdev);
if (!r) {
- dev_info(rdev->dev, "GPU reset succeed\n");
+ dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
radeon_resume(rdev);
- radeon_restore_bios_scratch_regs(rdev);
- drm_helper_resume_force_mode(rdev->ddev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
+ radeon_restore_bios_scratch_regs(rdev);
+ drm_helper_resume_force_mode(rdev->ddev);
+
+ if (!r) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ radeon_ring_restore(rdev, &rdev->ring[i],
+ ring_sizes[i], ring_data[i]);
+ }
+
+ r = radeon_ib_ring_tests(rdev);
+ if (r) {
+ dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
+ if (saved) {
+ radeon_suspend(rdev);
+ goto retry;
+ }
+ }
+ } else {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ kfree(ring_data[i]);
+ }
+ }
+
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
+ up_write(&rdev->exclusive_lock);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 64a008d1449..7ddef8f30d0 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1401,7 +1401,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_i2c_fini(rdev);
}
-static bool is_hdtv_mode(struct drm_display_mode *mode)
+static bool is_hdtv_mode(const struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
@@ -1414,7 +1414,7 @@ static bool is_hdtv_mode(struct drm_display_mode *mode)
}
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 2c4d53fd20c..dcea6f01ae4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -133,7 +133,7 @@ int radeon_tv = 1;
int radeon_audio = 0;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
-int radeon_pcie_gen2 = 0;
+int radeon_pcie_gen2 = -1;
int radeon_msi = -1;
int radeon_lockup_timeout = 10000;
@@ -179,7 +179,7 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
-MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
@@ -262,7 +262,6 @@ static struct drm_driver driver_old = {
.irq_postinstall = radeon_driver_irq_postinstall,
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = &radeon_driver_old_fops,
@@ -365,7 +364,6 @@ static struct drm_driver kms_driver = {
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 11f5f402d22..7b737b9339a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,39 +40,95 @@
#include "radeon.h"
#include "radeon_trace.h"
+/*
+ * Fences
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the the relevant GPU caches have been flushed. Whether
+ * we use a scratch register or memory location depends on the asic
+ * and whether writeback is enabled.
+ */
+
+/**
+ * radeon_fence_write - write a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @seq: sequence number to write
+ * @ring: ring index the fence is associated with
+ *
+ * Writes a fence value to memory or a scratch register (all asics).
+ */
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
- if (rdev->wb.enabled) {
- *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
+ struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+ if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+ *drv->cpu_addr = cpu_to_le32(seq);
} else {
- WREG32(rdev->fence_drv[ring].scratch_reg, seq);
+ WREG32(drv->scratch_reg, seq);
}
}
+/**
+ * radeon_fence_read - read a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Reads a fence value from memory or a scratch register (all asics).
+ * Returns the value of the fence read from memory or register.
+ */
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
{
+ struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
u32 seq = 0;
- if (rdev->wb.enabled) {
- seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
+ if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+ seq = le32_to_cpu(*drv->cpu_addr);
} else {
- seq = RREG32(rdev->fence_drv[ring].scratch_reg);
+ seq = RREG32(drv->scratch_reg);
}
return seq;
}
-int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
+/**
+ * radeon_fence_emit - emit a fence on the requested ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ * @ring: ring index the fence is associated with
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int radeon_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence **fence,
+ int ring)
{
/* we are protected by the ring emission mutex */
- if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
- return 0;
+ *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+ if ((*fence) == NULL) {
+ return -ENOMEM;
}
- fence->seq = ++rdev->fence_drv[fence->ring].seq;
- radeon_fence_ring_emit(rdev, fence->ring, fence);
- trace_radeon_fence_emit(rdev->ddev, fence->seq);
+ kref_init(&((*fence)->kref));
+ (*fence)->rdev = rdev;
+ (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
+ (*fence)->ring = ring;
+ radeon_fence_ring_emit(rdev, ring, *fence);
+ trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
return 0;
}
+/**
+ * radeon_fence_process - process a fence
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq;
@@ -133,30 +189,35 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
}
}
+/**
+ * radeon_fence_destroy - destroy a fence
+ *
+ * @kref: fence kref
+ *
+ * Frees the fence object (all asics).
+ */
static void radeon_fence_destroy(struct kref *kref)
{
struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
kfree(fence);
}
-int radeon_fence_create(struct radeon_device *rdev,
- struct radeon_fence **fence,
- int ring)
-{
- *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
- if ((*fence) == NULL) {
- return -ENOMEM;
- }
- kref_init(&((*fence)->kref));
- (*fence)->rdev = rdev;
- (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
- (*fence)->ring = ring;
- return 0;
-}
-
+/**
+ * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ *
+ * @rdev: radeon device pointer
+ * @seq: sequence number
+ * @ring: ring index the fence is associated with
+ *
+ * Check if the last singled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value. Helper function for
+ * radeon_fence_signaled().
+ */
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
u64 seq, unsigned ring)
{
@@ -171,15 +232,19 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
+/**
+ * radeon_fence_signaled - check if a fence has signaled
+ *
+ * @fence: radeon fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
bool radeon_fence_signaled(struct radeon_fence *fence)
{
if (!fence) {
return true;
}
- if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
- WARN(1, "Querying an unemitted fence : %p !\n", fence);
- return true;
- }
if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
return true;
}
@@ -190,6 +255,24 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return false;
}
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence number
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number we want to wait for
+ * @ring: ring index the fence is associated with
+ * @intr: use interruptable sleep
+ * @lock_ring: whether the ring should be locked or not
+ *
+ * Wait for the requested sequence number to be written (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number. Helper function
+ * for radeon_fence_wait(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected and the ring is
+ * marked as not ready so no further jobs get scheduled until a successful
+ * reset.
+ */
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
unsigned ring, bool intr, bool lock_ring)
{
@@ -285,6 +368,17 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
return 0;
}
+/**
+ * radeon_fence_wait - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
int r;
@@ -315,6 +409,20 @@ bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
return false;
}
+/**
+ * radeon_fence_wait_any_seq - wait for a sequence number on any ring
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics). Sequnce number array is indexed by ring id.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number. Helper function
+ * for radeon_fence_wait_any(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ */
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
u64 *target_seq, bool intr)
{
@@ -343,7 +451,7 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
/* nothing to wait for ? */
if (ring == RADEON_NUM_RINGS) {
- return 0;
+ return -ENOENT;
}
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
@@ -424,6 +532,19 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_fence_wait_any - wait for a fence to signal on any ring
+ *
+ * @rdev: radeon device pointer
+ * @fences: radeon fence object(s)
+ * @intr: use interruptable sleep
+ *
+ * Wait for any requested fence to signal (all asics). Fence
+ * array is indexed by ring id. @intr selects whether to use
+ * interruptable (true) or non-interruptable (false) sleep when
+ * waiting for the fences. Used by the suballocator.
+ * Returns 0 if any fence has passed, error for all other cases.
+ */
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr)
@@ -444,9 +565,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0;
}
- if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
- seq[i] = fences[i]->seq;
- }
+ seq[i] = fences[i]->seq;
}
r = radeon_fence_wait_any_seq(rdev, seq, intr);
@@ -456,16 +575,22 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_fence_wait_next_locked - wait for the next fence to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for the next fence on the requested ring to signal (all asics).
+ * Returns 0 if the next fence has passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq;
- /* We are not protected by ring lock when reading current seq but
- * it's ok as worst case is we return to early while we could have
- * wait.
- */
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
- if (seq >= rdev->fence_drv[ring].seq) {
+ if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
@@ -473,23 +598,59 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+/**
+ * radeon_fence_wait_empty_locked - wait for all fences to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns 0 if the fences have passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
- /* We are not protected by ring lock when reading current seq
- * but it's ok as wait empty is call from place where no more
- * activity can be scheduled so there won't be concurrent access
- * to seq value.
- */
- return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
- ring, false, false);
+ uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+
+ while(1) {
+ int r;
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r == -EDEADLK) {
+ mutex_unlock(&rdev->ring_lock);
+ r = radeon_gpu_reset(rdev);
+ mutex_lock(&rdev->ring_lock);
+ if (!r)
+ continue;
+ }
+ if (r) {
+ dev_err(rdev->dev, "error waiting for ring to become"
+ " idle (%d)\n", r);
+ }
+ return;
+ }
}
+/**
+ * radeon_fence_ref - take a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
kref_get(&fence->kref);
return fence;
}
+/**
+ * radeon_fence_unref - remove a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Remove a reference on a fence (all asics).
+ */
void radeon_fence_unref(struct radeon_fence **fence)
{
struct radeon_fence *tmp = *fence;
@@ -500,6 +661,16 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
+/**
+ * radeon_fence_count_emitted - get the count of emitted fences
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Get the number of fences emitted on the requested ring (all asics).
+ * Returns the number of emitted fences on the ring. Used by the
+ * dynpm code to ring track activity.
+ */
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
uint64_t emitted;
@@ -508,7 +679,8 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
* but it's ok to report slightly wrong fence count here.
*/
radeon_fence_process(rdev, ring);
- emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
+ emitted = rdev->fence_drv[ring].sync_seq[ring]
+ - atomic64_read(&rdev->fence_drv[ring].last_seq);
/* to avoid 32bits warp around */
if (emitted > 0x10000000) {
emitted = 0x10000000;
@@ -516,6 +688,83 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
return (unsigned)emitted;
}
+/**
+ * radeon_fence_need_sync - do we need a semaphore
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Check if the fence needs to be synced against another ring
+ * (all asics). If so, we need to emit a semaphore.
+ * Returns true if we need to sync with another ring, false if
+ * not.
+ */
+bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
+{
+ struct radeon_fence_driver *fdrv;
+
+ if (!fence) {
+ return false;
+ }
+
+ if (fence->ring == dst_ring) {
+ return false;
+ }
+
+ /* we are protected by the ring mutex */
+ fdrv = &fence->rdev->fence_drv[dst_ring];
+ if (fence->seq <= fdrv->sync_seq[fence->ring]) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * radeon_fence_note_sync - record the sync point
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Note the sequence number at which point the fence will
+ * be synced with the requested ring (all asics).
+ */
+void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
+{
+ struct radeon_fence_driver *dst, *src;
+ unsigned i;
+
+ if (!fence) {
+ return;
+ }
+
+ if (fence->ring == dst_ring) {
+ return;
+ }
+
+ /* we are protected by the ring mutex */
+ src = &fence->rdev->fence_drv[fence->ring];
+ dst = &fence->rdev->fence_drv[dst_ring];
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (i == dst_ring) {
+ continue;
+ }
+ dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
+ }
+}
+
+/**
+ * radeon_fence_driver_start_ring - make the fence driver
+ * ready for use on the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Make the fence driver ready for processing (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has.
+ * Returns 0 for success, errors for failure.
+ */
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
uint64_t index;
@@ -537,24 +786,49 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
+ radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
return 0;
}
+/**
+ * radeon_fence_driver_init_ring - init the fence driver
+ * for the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Init the fence driver for the requested ring (all asics).
+ * Helper function for radeon_fence_driver_init().
+ */
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
+ int i;
+
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
- rdev->fence_drv[ring].seq = 0;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
+/**
+ * radeon_fence_driver_init - init the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Init the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * radeon_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
int radeon_fence_driver_init(struct radeon_device *rdev)
{
int ring;
@@ -569,6 +843,14 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
int ring;
@@ -595,7 +877,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- int i;
+ int i, j;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!rdev->fence_drv[i].initialized)
@@ -604,8 +886,14 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
- seq_printf(m, "Last emitted 0x%016llx\n",
- rdev->fence_drv[i].seq);
+ seq_printf(m, "Last emitted 0x%016llx\n",
+ rdev->fence_drv[i].sync_seq[i]);
+
+ for (j = 0; j < RADEON_NUM_RINGS; ++j) {
+ if (i != j && rdev->fence_drv[j].initialized)
+ seq_printf(m, "Last sync to ring %d 0x%016llx\n",
+ j, rdev->fence_drv[i].sync_seq[j]);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84b648a7ddd..b3720054614 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -31,8 +31,38 @@
#include "radeon_reg.h"
/*
+ * GART
+ * The GART (Graphics Aperture Remapping Table) is an aperture
+ * in the GPU's address space. System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective. A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal GART, as described above,
+ * and AGP. AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal GART management.
+ */
+
+/*
* Common GART table functions.
*/
+/**
+ * radeon_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
{
void *ptr;
@@ -54,6 +84,15 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_gart_table_ram_free - free system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
+ * gart table to be in system memory.
+ */
void radeon_gart_table_ram_free(struct radeon_device *rdev)
{
if (rdev->gart.ptr == NULL) {
@@ -73,6 +112,16 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev)
rdev->gart.table_addr = 0;
}
+/**
+ * radeon_gart_table_vram_alloc - allocate vram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
int r;
@@ -88,6 +137,16 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
int radeon_gart_table_vram_pin(struct radeon_device *rdev)
{
uint64_t gpu_addr;
@@ -110,6 +169,14 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
return r;
}
+/**
+ * radeon_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
{
int r;
@@ -126,6 +193,15 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
}
}
+/**
+ * radeon_gart_table_vram_free - free gart page table vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+). These asics require the gart table to
+ * be in video memory.
+ */
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
if (rdev->gart.robj == NULL) {
@@ -135,12 +211,19 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
radeon_bo_unref(&rdev->gart.robj);
}
-
-
-
/*
* Common gart functions.
*/
+/**
+ * radeon_gart_unbind - unbind pages from the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ */
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages)
{
@@ -172,6 +255,19 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
radeon_gart_tlb_flush(rdev);
}
+/**
+ * radeon_gart_bind - bind pages into the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
@@ -203,6 +299,14 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
return 0;
}
+/**
+ * radeon_gart_restore - bind all pages in the gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Binds all pages in the gart page table (all asics).
+ * Used to rebuild the gart table on device startup or resume.
+ */
void radeon_gart_restore(struct radeon_device *rdev)
{
int i, j, t;
@@ -222,6 +326,14 @@ void radeon_gart_restore(struct radeon_device *rdev)
radeon_gart_tlb_flush(rdev);
}
+/**
+ * radeon_gart_init - init the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
int radeon_gart_init(struct radeon_device *rdev)
{
int r, i;
@@ -262,6 +374,13 @@ int radeon_gart_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_gart_fini - tear down the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
void radeon_gart_fini(struct radeon_device *rdev)
{
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
@@ -278,35 +397,104 @@ void radeon_gart_fini(struct radeon_device *rdev)
}
/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time. The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID. When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer. VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/*
* vm helpers
*
* TODO bind a default page at vm initialization for default address
*/
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_manager_init(struct radeon_device *rdev)
{
+ struct radeon_vm *vm;
+ struct radeon_bo_va *bo_va;
int r;
- rdev->vm_manager.enabled = false;
+ if (!rdev->vm_manager.enabled) {
+ /* mark first vm as always in use, it's the system one */
+ /* allocate enough for 2 full VM pts */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+ rdev->vm_manager.max_pfn * 8 * 2,
+ RADEON_GEM_DOMAIN_VRAM);
+ if (r) {
+ dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+ (rdev->vm_manager.max_pfn * 8) >> 10);
+ return r;
+ }
- /* mark first vm as always in use, it's the system one */
- /* allocate enough for 2 full VM pts */
- r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8 * 2,
- RADEON_GEM_DOMAIN_VRAM);
- if (r) {
- dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
- (rdev->vm_manager.max_pfn * 8) >> 10);
- return r;
+ r = rdev->vm_manager.funcs->init(rdev);
+ if (r)
+ return r;
+
+ rdev->vm_manager.enabled = true;
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+ if (r)
+ return r;
}
- r = rdev->vm_manager.funcs->init(rdev);
- if (r == 0)
- rdev->vm_manager.enabled = true;
+ /* restore page table */
+ list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
+ if (vm->id == -1)
+ continue;
- return r;
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ struct ttm_mem_reg *mem = NULL;
+ if (bo_va->valid)
+ mem = &bo_va->bo->tbo.mem;
+
+ bo_va->valid = false;
+ r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
+ if (r) {
+ DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
+ }
+ }
+
+ r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
+ if (r) {
+ DRM_ERROR("Failed to bind vm %d!\n", vm->id);
+ }
+ }
+ return 0;
}
-/* cs mutex must be lock */
+/* global mutex must be lock */
+/**
+ * radeon_vm_unbind_locked - unbind a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Unbind the requested vm (cayman+).
+ * Wait for use of the VM to finish, then unbind the page table,
+ * and free the page table memory.
+ */
static void radeon_vm_unbind_locked(struct radeon_device *rdev,
struct radeon_vm *vm)
{
@@ -317,10 +505,21 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
}
/* wait for vm use to end */
- if (vm->fence) {
- radeon_fence_wait(vm->fence, false);
- radeon_fence_unref(&vm->fence);
+ while (vm->fence) {
+ int r;
+ r = radeon_fence_wait(vm->fence, false);
+ if (r)
+ DRM_ERROR("error while waiting for fence: %d\n", r);
+ if (r == -EDEADLK) {
+ mutex_unlock(&rdev->vm_manager.lock);
+ r = radeon_gpu_reset(rdev);
+ mutex_lock(&rdev->vm_manager.lock);
+ if (!r)
+ continue;
+ }
+ break;
}
+ radeon_fence_unref(&vm->fence);
/* hw unbind */
rdev->vm_manager.funcs->unbind(rdev, vm);
@@ -335,39 +534,42 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
}
}
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
void radeon_vm_manager_fini(struct radeon_device *rdev)
{
- if (rdev->vm_manager.sa_manager.bo == NULL)
- return;
- radeon_vm_manager_suspend(rdev);
- rdev->vm_manager.funcs->fini(rdev);
- radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
- rdev->vm_manager.enabled = false;
-}
-
-int radeon_vm_manager_start(struct radeon_device *rdev)
-{
- if (rdev->vm_manager.sa_manager.bo == NULL) {
- return -EINVAL;
- }
- return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
-}
-
-int radeon_vm_manager_suspend(struct radeon_device *rdev)
-{
struct radeon_vm *vm, *tmp;
- radeon_mutex_lock(&rdev->cs_mutex);
+ if (!rdev->vm_manager.enabled)
+ return;
+
+ mutex_lock(&rdev->vm_manager.lock);
/* unbind all active vm */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
radeon_vm_unbind_locked(rdev, vm);
}
rdev->vm_manager.funcs->fini(rdev);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+ mutex_unlock(&rdev->vm_manager.lock);
+
+ radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+ radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+ rdev->vm_manager.enabled = false;
}
-/* cs mutex must be lock */
+/* global mutex must be locked */
+/**
+ * radeon_vm_unbind - locked version of unbind
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Locked version that wraps radeon_vm_unbind_locked (cayman+).
+ */
void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
{
mutex_lock(&vm->mutex);
@@ -375,7 +577,19 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_unlock(&vm->mutex);
}
-/* cs mutex must be lock & vm mutex must be lock */
+/* global and local mutex must be locked */
+/**
+ * radeon_vm_bind - bind a page table to a VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to bind
+ *
+ * Bind the requested vm (cayman+).
+ * Suballocate memory for the page table, allocate a VMID
+ * and bind the page table to it, and finally start to populate
+ * the page table.
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
@@ -438,6 +652,20 @@ retry_id:
}
/* object have to be reserved */
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @offset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm and validate
+ * the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
@@ -479,7 +707,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
if (last_pfn > vm->last_pfn) {
/* release mutex and lock in right order */
mutex_unlock(&vm->mutex);
- radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
/* and check again */
if (last_pfn > vm->last_pfn) {
@@ -488,7 +716,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
radeon_vm_unbind_locked(rdev, vm);
vm->last_pfn = (last_pfn + align) & ~align;
}
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
}
head = &vm->va;
last_offset = 0;
@@ -515,6 +743,17 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_vm_get_addr - get the physical address of the page
+ *
+ * @rdev: radeon_device pointer
+ * @mem: ttm mem
+ * @pfn: pfn
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
static u64 radeon_vm_get_addr(struct radeon_device *rdev,
struct ttm_mem_reg *mem,
unsigned pfn)
@@ -543,7 +782,18 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
return addr;
}
-/* object have to be reserved & cs mutex took & vm mutex took */
+/* object have to be reserved & global and local mutex must be locked */
+/**
+ * radeon_vm_bo_update_pte - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ */
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
@@ -592,6 +842,18 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
}
/* object have to be reserved */
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Remove @bo from the requested vm (cayman+).
+ * Remove @bo from the list of bos associated with the vm and
+ * remove the ptes for @bo in the page table.
+ * Returns 0 for success.
+ */
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo)
@@ -602,10 +864,10 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
if (bo_va == NULL)
return 0;
- radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list);
@@ -614,6 +876,15 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo)
{
@@ -625,6 +896,17 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
}
}
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm (cayman+).
+ * Map the IB pool and any other shared objects into the VM
+ * by default as it's used by all VMs.
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
int r;
@@ -651,15 +933,24 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
return r;
}
+/**
+ * radeon_vm_init - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
- radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_unbind_locked(rdev, vm);
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
/* remove all bo */
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 21ec9f5653c..84d04524573 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -159,11 +159,9 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
{
if (r == -EDEADLK) {
- radeon_mutex_lock(&rdev->cs_mutex);
r = radeon_gpu_reset(rdev);
if (!r)
r = -EAGAIN;
- radeon_mutex_unlock(&rdev->cs_mutex);
}
return r;
}
@@ -217,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ down_read(&rdev->exclusive_lock);
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
args->initial_domain, false,
false, &gobj);
if (r) {
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -230,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
+ up_read(&rdev->exclusive_lock);
return 0;
}
@@ -242,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
/* transition the BO to a domain -
* just validate the BO into a certain domain */
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -249,10 +252,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* for now if someone requests domain CPU -
* just make sure the buffer is finished with */
+ down_read(&rdev->exclusive_lock);
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
+ up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
@@ -260,6 +265,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_unreference_unlocked(gobj);
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5df58d1aba0..afaa1727abd 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,17 @@
#include "radeon.h"
#include "atom.h"
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
+/**
+ * radeon_driver_irq_handler_kms - irq handler for KMS
+ *
+ * @DRM_IRQ_ARGS: args
+ *
+ * This is the irq handler for the radeon KMS driver (all asics).
+ * radeon_irq_process is a macro that points to the per-asic
+ * irq handler callback.
+ */
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -43,6 +54,17 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
/*
* Handle hotplug events outside the interrupt handler proper.
*/
+/**
+ * radeon_hotplug_work_func - display hotplug work handler
+ *
+ * @work: work struct
+ *
+ * This is the hot plug event work handler (all asics).
+ * The work gets scheduled from the irq handler if there
+ * was a hot plug interrupt. It walks the connector table
+ * and calls the hotplug handler for each one, then sends
+ * a drm hotplug event to alert userspace.
+ */
static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
@@ -59,61 +81,94 @@ static void radeon_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
+/**
+ * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Gets the hw ready to enable irqs (all asics).
+ * This function disables all interrupt sources on the GPU.
+ */
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
unsigned i;
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = false;
+ atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
- rdev->irq.pflip[i] = false;
+ atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */
radeon_irq_process(rdev);
}
+/**
+ * radeon_driver_irq_postinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Handles stuff to be done after enabling irqs (all asics).
+ * Returns 0 on success.
+ */
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
- unsigned i;
-
dev->max_vblank_count = 0x001fffff;
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = true;
- radeon_irq_set(rdev);
return 0;
}
+/**
+ * radeon_driver_irq_uninstall_kms - drm irq uninstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * This function disables all interrupt sources on the GPU (all asics).
+ */
void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
unsigned i;
if (rdev == NULL) {
return;
}
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = false;
+ atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
- rdev->irq.pflip[i] = false;
+ atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
+/**
+ * radeon_msi_ok - asic specific msi checks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Handles asic specific MSI checks to determine if
+ * MSIs should be enabled on a particular chip (all asics).
+ * Returns true if MSIs should be enabled, false if MSIs
+ * should not be enabled.
+ */
static bool radeon_msi_ok(struct radeon_device *rdev)
{
/* RV370/RV380 was first asic with MSI support */
@@ -166,17 +221,22 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
return true;
}
+/**
+ * radeon_irq_kms_init - init driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+ * Returns 0 for success, error for failure.
+ */
int radeon_irq_kms_init(struct radeon_device *rdev)
{
- int i;
int r = 0;
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
- spin_lock_init(&rdev->irq.sw_lock);
- for (i = 0; i < rdev->num_crtc; i++)
- spin_lock_init(&rdev->irq.pflip_lock[i]);
+ spin_lock_init(&rdev->irq.lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
@@ -201,6 +261,13 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_irq_kms_fini - tear down driver interrrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ */
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
drm_vblank_cleanup(rdev->ddev);
@@ -213,31 +280,63 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
flush_work_sync(&rdev->hotplug_work);
}
+/**
+ * radeon_irq_kms_sw_irq_get - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
- spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
- rdev->irq.sw_int[ring] = true;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+/**
+ * radeon_irq_kms_sw_irq_put - disable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to disable
+ *
+ * Disables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
- spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
- rdev->irq.sw_int[ring] = false;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+/**
+ * radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to enable
+ *
+ * Enables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
@@ -245,14 +344,25 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
- spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
- rdev->irq.pflip[crtc] = true;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
}
+/**
+ * radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to disable
+ *
+ * Disables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
@@ -260,12 +370,121 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
- spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
- rdev->irq.pflip[crtc] = false;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
}
+/**
+ * radeon_irq_kms_enable_afmt - enable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to enable
+ *
+ * Enables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.afmt[block] = true;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+}
+
+/**
+ * radeon_irq_kms_disable_afmt - disable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to disable
+ *
+ * Disables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.afmt[block] = false;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to enable.
+ *
+ * Enables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+ unsigned long irqflags;
+ int i;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+ rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to disable.
+ *
+ * Disables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+ unsigned long irqflags;
+ int i;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+ rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle
+ *
+ * @rdev: radeon device pointer
+ *
+ * Enabled the GUI idle interrupt and waits for it to fire (r6xx+).
+ * This is currently used to make sure the 3D engine is idle for power
+ * management, but should be replaces with proper fence waits.
+ * GUI idle interrupts don't work very well on pre-r6xx hw and it also
+ * does not take into account other aspects of the chip that may be busy.
+ * DO NOT USE GOING FORWARD.
+ */
+int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+ int r;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.gui_idle = true;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+ r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
+ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.gui_idle = false;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5c58d7d90cb..1d73f16b5d9 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -33,6 +33,17 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+/**
+ * radeon_driver_unload_kms - Main unload function for KMS.
+ *
+ * @dev: drm dev pointer
+ *
+ * This is the main unload function for KMS (all asics).
+ * It calls radeon_modeset_fini() to tear down the
+ * displays, and radeon_device_fini() to tear down
+ * the rest of the device (CP, writeback, etc.).
+ * Returns 0 on success.
+ */
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -46,6 +57,19 @@ int radeon_driver_unload_kms(struct drm_device *dev)
return 0;
}
+/**
+ * radeon_driver_load_kms - Main load function for KMS.
+ *
+ * @dev: drm dev pointer
+ * @flags: device flags
+ *
+ * This is the main load function for KMS (all asics).
+ * It calls radeon_device_init() to set up the non-display
+ * parts of the chip (asic init, CP, writeback, etc.), and
+ * radeon_modeset_init() to set up the display parts
+ * (crtcs, encoders, hotplug detect, etc.).
+ * Returns 0 on success, error on failure.
+ */
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct radeon_device *rdev;
@@ -96,6 +120,16 @@ out:
return r;
}
+/**
+ * radeon_set_filp_rights - Set filp right.
+ *
+ * @dev: drm dev pointer
+ * @owner: drm file
+ * @applier: drm file
+ * @value: value
+ *
+ * Sets the filp rights for the device (all asics).
+ */
static void radeon_set_filp_rights(struct drm_device *dev,
struct drm_file **owner,
struct drm_file *applier,
@@ -118,6 +152,18 @@ static void radeon_set_filp_rights(struct drm_device *dev,
/*
* Userspace get information ioctl
*/
+/**
+ * radeon_info_ioctl - answer a device specific request.
+ *
+ * @rdev: radeon device pointer
+ * @data: request object
+ * @filp: drm filp
+ *
+ * This function is used to pass device specific parameters to the userspace
+ * drivers. Examples include: pci device id, pipeline parms, tiling params,
+ * etc. (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
@@ -301,16 +347,40 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/*
* Outdated mess for old drm with Xorg being in charge (void function now).
*/
+/**
+ * radeon_driver_firstopen_kms - drm callback for first open
+ *
+ * @dev: drm dev pointer
+ *
+ * Nothing to be done for KMS (all asics).
+ * Returns 0 on success.
+ */
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
return 0;
}
+/**
+ * radeon_driver_firstopen_kms - drm callback for last close
+ *
+ * @dev: drm dev pointer
+ *
+ * Switch vga switcheroo state after last close (all asics).
+ */
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
}
+/**
+ * radeon_driver_open_kms - drm callback for open
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device open, init vm on cayman+ (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
@@ -339,6 +409,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return 0;
}
+/**
+ * radeon_driver_postclose_kms - drm callback for post close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device post close, tear down vm on cayman+ (all asics).
+ */
void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -354,6 +432,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
}
}
+/**
+ * radeon_driver_preclose_kms - drm callback for pre close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
+ * (all asics).
+ */
void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -367,6 +454,15 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
/*
* VBlank related functions.
*/
+/**
+ * radeon_get_vblank_counter_kms - get frame count
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the frame count from
+ *
+ * Gets the frame count on the requested crtc (all asics).
+ * Returns frame count on success, -EINVAL on failure.
+ */
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
@@ -379,34 +475,70 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
return radeon_get_vblank_counter(rdev, crtc);
}
+/**
+ * radeon_enable_vblank_kms - enable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to enable vblank interrupt for
+ *
+ * Enable the interrupt on the requested crtc (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
+ int r;
if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = true;
-
- return radeon_irq_set(rdev);
+ r = radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ return r;
}
+/**
+ * radeon_disable_vblank_kms - disable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to disable vblank interrupt for
+ *
+ * Disable the interrupt on the requested crtc (all asics).
+ */
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return;
}
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = false;
-
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
+/**
+ * radeon_get_vblank_timestamp_kms - get vblank timestamp
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the timestamp for
+ * @max_error: max error
+ * @vblank_time: time value
+ * @flags: flags passed to the driver
+ *
+ * Gets the timestamp on the requested crtc based on the
+ * scanout position. (all asics).
+ * Returns postive status flags on success, negative error on failure.
+ */
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
int *max_error,
struct timeval *vblank_time,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 210317c7045..d5fd615897e 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -990,7 +990,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index a0c82229e8f..670e9910f86 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -244,7 +244,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
}
static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5b10ffd7bb2..f380d59c576 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -488,7 +488,7 @@ extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
@@ -678,7 +678,7 @@ void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 830f1a7b486..1f1a4c803c1 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -115,9 +115,7 @@ int radeon_bo_create(struct radeon_device *rdev,
size = ALIGN(size, PAGE_SIZE);
- if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
- }
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
@@ -138,7 +136,6 @@ int radeon_bo_create(struct radeon_device *rdev,
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
sizeof(struct radeon_bo));
-retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@@ -152,13 +149,15 @@ retry:
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
+
+retry:
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
- mutex_lock(&rdev->vram_mutex);
+ down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, 0, !kernel, NULL,
acc_size, sg, &radeon_ttm_bo_destroy);
- mutex_unlock(&rdev->vram_mutex);
+ up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
if (domain == RADEON_GEM_DOMAIN_VRAM) {
@@ -219,9 +218,9 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- mutex_lock(&rdev->vram_mutex);
+ down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
- mutex_unlock(&rdev->vram_mutex);
+ up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5b37e283ec3..7ae60660010 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -34,7 +34,6 @@
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
-#define RADEON_WAIT_IDLE_TIMEOUT 200
static const char *radeon_pm_state_type_name[5] = {
"Default",
@@ -251,21 +250,14 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
return;
mutex_lock(&rdev->ddev->struct_mutex);
- mutex_lock(&rdev->vram_mutex);
+ down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
if (rdev->irq.installed) {
- /* wait for GPU idle */
- rdev->pm.gui_idle = false;
- rdev->irq.gui_idle = true;
- radeon_irq_set(rdev);
- wait_event_interruptible_timeout(
- rdev->irq.idle_queue, rdev->pm.gui_idle,
- msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
- rdev->irq.gui_idle = false;
- radeon_irq_set(rdev);
+ /* wait for GPU to become idle */
+ radeon_irq_kms_wait_gui_idle(rdev);
}
} else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -303,7 +295,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
mutex_unlock(&rdev->ring_lock);
- mutex_unlock(&rdev->vram_mutex);
+ up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 983658c9135..ec79b375043 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -35,47 +35,97 @@
#include "atom.h"
/*
- * IB.
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
*/
int radeon_debugfs_sa_init(struct radeon_device *rdev);
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size)
{
- int r;
+ int i, r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
- r = radeon_fence_create(rdev, &ib->fence, ring);
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
if (r) {
- dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
- radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
return r;
}
+ ib->ring = ring;
+ ib->fence = NULL;
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
ib->vm_id = 0;
ib->is_const_ib = false;
- ib->semaphore = NULL;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ ib->sync_to[i] = NULL;
return 0;
}
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
- int r = 0;
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ bool need_sync = false;
+ int i, r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
@@ -84,17 +134,51 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64);
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
- radeon_fence_emit(rdev, ib->fence);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = ib->sync_to[i];
+ if (radeon_fence_need_sync(fence, ib->ring)) {
+ need_sync = true;
+ radeon_semaphore_sync_rings(rdev, ib->semaphore,
+ fence->ring, ib->ring);
+ radeon_fence_note_sync(fence, ib->ring);
+ }
+ }
+ /* immediately free semaphore when we don't need to sync */
+ if (!need_sync) {
+ radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+ }
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ib_pool_init(struct radeon_device *rdev)
{
int r;
@@ -108,6 +192,12 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (r) {
return r;
}
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
rdev->ib_pool_ready = true;
if (radeon_debugfs_sa_init(rdev)) {
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
@@ -115,24 +205,33 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
rdev->ib_pool_ready = false;
}
}
-int radeon_ib_pool_start(struct radeon_device *rdev)
-{
- return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
-}
-
-int radeon_ib_pool_suspend(struct radeon_device *rdev)
-{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
-}
-
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
int radeon_ib_ring_tests(struct radeon_device *rdev)
{
unsigned i;
@@ -164,10 +263,28 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
}
/*
- * Ring.
+ * Rings
+ * Most engines on the GPU are fed via ring buffers. Ring
+ * buffers are areas of GPU accessible memory that the host
+ * writes commands into and the GPU reads commands out of.
+ * There is a rptr (read pointer) that determines where the
+ * GPU is currently reading, and a wptr (write pointer)
+ * which determines where the host has written. When the
+ * pointers are equal, the ring is idle. When the host
+ * writes commands to the ring buffer, it increments the
+ * wptr. The GPU then starts fetching commands and executes
+ * them until the pointers are equal again.
*/
int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
@@ -181,21 +298,37 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
ring->ring_free_dw--;
}
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
+/**
+ * radeon_ring_supports_scratch_reg - check if the ring supports
+ * writing to scratch registers
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if a specific ring supports writing to scratch registers (all asics).
+ * Returns true if the ring supports writing to scratch regs, false if not.
+ */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
- /* r1xx-r5xx only has CP ring */
- if (rdev->family < CHIP_R600)
- return RADEON_RING_TYPE_GFX_INDEX;
-
- if (rdev->family >= CHIP_CAYMAN) {
- if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
- return CAYMAN_RING_TYPE_CP1_INDEX;
- else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
- return CAYMAN_RING_TYPE_CP2_INDEX;
+ switch (ring->idx) {
+ case RADEON_RING_TYPE_GFX_INDEX:
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ return true;
+ default:
+ return false;
}
- return RADEON_RING_TYPE_GFX_INDEX;
}
+/**
+ * radeon_ring_free_size - update the free size
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the free dw slots in the ring buffer (all asics).
+ */
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rptr;
@@ -214,7 +347,16 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
}
}
-
+/**
+ * radeon_ring_alloc - allocate space on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
@@ -227,7 +369,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
+ r = radeon_fence_wait_next_locked(rdev, ring->idx);
if (r)
return r;
}
@@ -236,6 +378,17 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
return 0;
}
+/**
+ * radeon_ring_lock - lock the ring and allocate space on it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Lock the ring and allocate @ndw dwords in the ring buffer
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
@@ -249,15 +402,20 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
return 0;
}
+/**
+ * radeon_ring_commit - tell the GPU to execute the new
+ * commands on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the wptr (write pointer) to tell the GPU to
+ * execute new commands on the ring buffer (all asics).
+ */
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
- unsigned count_dw_pad;
- unsigned i;
-
/* We pad to match fetch size */
- count_dw_pad = (ring->align_mask + 1) -
- (ring->wptr & ring->align_mask);
- for (i = 0; i < count_dw_pad; i++) {
+ while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
DRM_MEMORYBARRIER();
@@ -265,23 +423,55 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
(void)RREG32(ring->wptr_reg);
}
+/**
+ * radeon_ring_unlock_commit - tell the GPU to execute the new
+ * commands on the ring buffer and unlock it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_commit() then unlock the ring (all asics).
+ */
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_ring_undo - reset the wptr
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Reset the driver's copy of the wtpr (all asics).
+ */
void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
}
+/**
+ * radeon_ring_unlock_undo - reset the wptr and unlock the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_undo() then unlock the ring (all asics).
+ */
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_undo(ring);
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_ring_force_activity - add some nop packets to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Add some nop packets to the ring to force activity (all asics).
+ * Used for lockup detection to see if the rptr is advancing.
+ */
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
@@ -296,6 +486,13 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
}
}
+/**
+ * radeon_ring_force_activity - update lockup variables
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the last rptr value and timestamp (all asics).
+ */
void radeon_ring_lockup_update(struct radeon_ring *ring)
{
ring->last_rptr = ring->rptr;
@@ -349,6 +546,116 @@ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return false;
}
+/**
+ * radeon_ring_backup - Back up the content of a ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the ring we want to back up
+ *
+ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
+ */
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+ uint32_t **data)
+{
+ unsigned size, ptr, i;
+
+ /* just in case lock the ring */
+ mutex_lock(&rdev->ring_lock);
+ *data = NULL;
+
+ if (ring->ring_obj == NULL) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* it doesn't make sense to save anything if all fences are signaled */
+ if (!radeon_fence_count_emitted(rdev, ring->idx)) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* calculate the number of dw on the ring */
+ if (ring->rptr_save_reg)
+ ptr = RREG32(ring->rptr_save_reg);
+ else if (rdev->wb.enabled)
+ ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+ else {
+ /* no way to read back the next rptr */
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ size = ring->wptr + (ring->ring_size / 4);
+ size -= ptr;
+ size &= ring->ptr_mask;
+ if (size == 0) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* and then save the content of the ring */
+ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+ if (!*data) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+ for (i = 0; i < size; ++i) {
+ (*data)[i] = ring->ring[ptr++];
+ ptr &= ring->ptr_mask;
+ }
+
+ mutex_unlock(&rdev->ring_lock);
+ return size;
+}
+
+/**
+ * radeon_ring_restore - append saved commands to the ring again
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring to append commands to
+ * @size: number of dwords we want to write
+ * @data: saved commands
+ *
+ * Allocates space on the ring and restore the previously saved commands.
+ */
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned size, uint32_t *data)
+{
+ int i, r;
+
+ if (!size || !data)
+ return 0;
+
+ /* restore the saved ring content */
+ r = radeon_ring_lock(rdev, ring, size);
+ if (r)
+ return r;
+
+ for (i = 0; i < size; ++i) {
+ radeon_ring_write(ring, data[i]);
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ kfree(data);
+ return 0;
+}
+
+/**
+ * radeon_ring_init - init driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ring_size: size of the ring
+ * @rptr_offs: offset of the rptr writeback location in the WB buffer
+ * @rptr_reg: MMIO offset of the rptr register
+ * @wptr_reg: MMIO offset of the wptr register
+ * @ptr_reg_shift: bit offset of the rptr/wptr values
+ * @ptr_reg_mask: bit mask of the rptr/wptr values
+ * @nop: nop packet for this ring
+ *
+ * Initialize the driver information for the selected ring (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
@@ -391,12 +698,25 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
+ if (rdev->wb.enabled) {
+ u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
+ ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
+ ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
+ }
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
return 0;
}
+/**
+ * radeon_ring_fini - tear down the driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Tear down the driver information for the selected ring (all asics).
+ */
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
@@ -438,6 +758,10 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
count = (ring->ring_size / 4) - ring->ring_free_dw;
seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ if (ring->rptr_save_reg) {
+ seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
+ RREG32(ring->rptr_save_reg));
+ }
seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 32059b74572..4e771240fdd 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -54,7 +54,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
{
int i, r;
- spin_lock_init(&sa_manager->lock);
+ init_waitqueue_head(&sa_manager->wq);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
@@ -211,6 +211,39 @@ static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
return false;
}
+/**
+ * radeon_sa_event - Check if we can stop waiting
+ *
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Check if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly
+ */
+static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+ int i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!list_empty(&sa_manager->flist[i])) {
+ return true;
+ }
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ return true;
+ }
+
+ return false;
+}
+
static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
struct radeon_fence **fences,
unsigned *tries)
@@ -297,8 +330,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
- spin_lock(&sa_manager->lock);
- do {
+ spin_lock(&sa_manager->wq.lock);
+ while(1) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
tries[i] = 0;
@@ -309,30 +342,34 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
- spin_unlock(&sa_manager->lock);
+ spin_unlock(&sa_manager->wq.lock);
return 0;
}
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
- if (block) {
- spin_unlock(&sa_manager->lock);
- r = radeon_fence_wait_any(rdev, fences, false);
- spin_lock(&sa_manager->lock);
- if (r) {
- /* if we have nothing to wait for we
- are practically out of memory */
- if (r == -ENOENT) {
- r = -ENOMEM;
- }
- goto out_err;
- }
+ if (!block) {
+ break;
+ }
+
+ spin_unlock(&sa_manager->wq.lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
+ spin_lock(&sa_manager->wq.lock);
+ /* if we have nothing to wait for block */
+ if (r == -ENOENT) {
+ r = wait_event_interruptible_locked(
+ sa_manager->wq,
+ radeon_sa_event(sa_manager, size, align)
+ );
+ }
+ if (r) {
+ goto out_err;
}
- } while (block);
+ };
out_err:
- spin_unlock(&sa_manager->lock);
+ spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
@@ -348,15 +385,16 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
}
sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->lock);
- if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ spin_lock(&sa_manager->wq.lock);
+ if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
&sa_manager->flist[fence->ring]);
} else {
radeon_sa_bo_remove_locked(*sa_bo);
}
- spin_unlock(&sa_manager->lock);
+ wake_up_all_locked(&sa_manager->wq);
+ spin_unlock(&sa_manager->wq.lock);
*sa_bo = NULL;
}
@@ -366,7 +404,7 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
{
struct radeon_sa_bo *i;
- spin_lock(&sa_manager->lock);
+ spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
@@ -381,6 +419,6 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
}
seq_printf(m, "\n");
}
- spin_unlock(&sa_manager->lock);
+ spin_unlock(&sa_manager->wq.lock);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index e2ace5dce11..7cc78de6ddc 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -68,70 +68,49 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
+/* caller must hold ring lock */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- bool sync_to[RADEON_NUM_RINGS],
- int dst_ring)
+ int signaler, int waiter)
{
- int i = 0, r;
+ int r;
- mutex_lock(&rdev->ring_lock);
- r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
- if (r) {
- goto error;
+ /* no need to signal and wait on the same ring */
+ if (signaler == waiter) {
+ return 0;
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (!sync_to[i] || i == dst_ring)
- continue;
-
- /* prevent GPU deadlocks */
- if (!rdev->ring[i].ready) {
- dev_err(rdev->dev, "Trying to sync to a disabled ring!");
- r = -EINVAL;
- goto error;
- }
-
- r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
- if (r) {
- goto error;
- }
-
- radeon_semaphore_emit_signal(rdev, i, semaphore);
- radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[signaler].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ return -EINVAL;
+ }
- radeon_ring_commit(rdev, &rdev->ring[i]);
+ r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
+ if (r) {
+ return r;
}
+ radeon_semaphore_emit_signal(rdev, signaler, semaphore);
+ radeon_ring_commit(rdev, &rdev->ring[signaler]);
- radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
- mutex_unlock(&rdev->ring_lock);
+ /* we assume caller has already allocated space on waiters ring */
+ radeon_semaphore_emit_wait(rdev, waiter, semaphore);
return 0;
-
-error:
- /* unlock all locks taken so far */
- for (--i; i >= 0; --i) {
- if (sync_to[i] || i == dst_ring) {
- radeon_ring_undo(&rdev->ring[i]);
- }
- }
- radeon_ring_undo(&rdev->ring[dst_ring]);
- mutex_unlock(&rdev->ring_lock);
- return r;
}
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
+ struct radeon_semaphore **semaphore,
struct radeon_fence *fence)
{
- if (semaphore == NULL) {
+ if (semaphore == NULL || *semaphore == NULL) {
return;
}
- if (semaphore->waiters > 0) {
+ if ((*semaphore)->waiters > 0) {
dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
- " hardware lockup imminent!\n", semaphore);
+ " hardware lockup imminent!\n", *semaphore);
}
- radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
- kfree(semaphore);
+ radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+ kfree(*semaphore);
+ *semaphore = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index efff929ea49..7c16540c10f 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -106,13 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
- goto out_cleanup;
- }
-
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
+ r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -155,13 +149,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
- goto out_cleanup;
- }
-
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
+ r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
@@ -241,36 +229,33 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
{
struct radeon_fence *fence1 = NULL, *fence2 = NULL;
struct radeon_semaphore *semaphore = NULL;
- int ridxA = radeon_ring_index(rdev, ringA);
- int ridxB = radeon_ring_index(rdev, ringB);
int r;
- r = radeon_fence_create(rdev, &fence1, ridxA);
+ r = radeon_semaphore_create(rdev, &semaphore);
if (r) {
- DRM_ERROR("Failed to create sync fence 1\n");
+ DRM_ERROR("Failed to create semaphore\n");
goto out_cleanup;
}
- r = radeon_fence_create(rdev, &fence2, ridxA);
+
+ r = radeon_ring_lock(rdev, ringA, 64);
if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
goto out_cleanup;
}
-
- r = radeon_semaphore_create(rdev, &semaphore);
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fence1, ringA->idx);
if (r) {
- DRM_ERROR("Failed to create semaphore\n");
+ DRM_ERROR("Failed to emit fence 1\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
-
- r = radeon_ring_lock(rdev, ringA, 64);
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fence2, ringA->idx);
if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ DRM_ERROR("Failed to emit fence 2\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fence1);
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fence2);
radeon_ring_unlock_commit(rdev, ringA);
mdelay(1000);
@@ -285,7 +270,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringB);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB);
r = radeon_fence_wait(fence1, false);
@@ -306,7 +291,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringB);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB);
r = radeon_fence_wait(fence2, false);
@@ -316,8 +301,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
}
out_cleanup:
- if (semaphore)
- radeon_semaphore_free(rdev, semaphore, NULL);
+ radeon_semaphore_free(rdev, &semaphore, NULL);
if (fence1)
radeon_fence_unref(&fence1);
@@ -336,23 +320,9 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
{
struct radeon_fence *fenceA = NULL, *fenceB = NULL;
struct radeon_semaphore *semaphore = NULL;
- int ridxA = radeon_ring_index(rdev, ringA);
- int ridxB = radeon_ring_index(rdev, ringB);
- int ridxC = radeon_ring_index(rdev, ringC);
bool sigA, sigB;
int i, r;
- r = radeon_fence_create(rdev, &fenceA, ridxA);
- if (r) {
- DRM_ERROR("Failed to create sync fence 1\n");
- goto out_cleanup;
- }
- r = radeon_fence_create(rdev, &fenceB, ridxB);
- if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
- goto out_cleanup;
- }
-
r = radeon_semaphore_create(rdev, &semaphore);
if (r) {
DRM_ERROR("Failed to create semaphore\n");
@@ -361,20 +331,30 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringA, 64);
if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
+ if (r) {
+ DRM_ERROR("Failed to emit sync fence 1\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fenceA);
radeon_ring_unlock_commit(rdev, ringA);
r = radeon_ring_lock(rdev, ringB, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+ DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ radeon_ring_unlock_undo(rdev, ringB);
goto out_cleanup;
}
- radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
- radeon_fence_emit(rdev, fenceB);
radeon_ring_unlock_commit(rdev, ringB);
mdelay(1000);
@@ -393,7 +373,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringC);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC);
for (i = 0; i < 30; ++i) {
@@ -419,7 +399,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringC);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC);
mdelay(1000);
@@ -436,8 +416,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
}
out_cleanup:
- if (semaphore)
- radeon_semaphore_free(rdev, semaphore, NULL);
+ radeon_semaphore_free(rdev, &semaphore, NULL);
if (fenceA)
radeon_fence_unref(&fenceA);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c94a2257761..5b71c716d83 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,15 +222,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
- struct radeon_fence *fence, *old_fence;
- struct radeon_semaphore *sem = NULL;
- int r;
+ struct radeon_fence *fence;
+ int r, ridx;
rdev = radeon_get_rdev(bo->bdev);
- r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
- if (unlikely(r)) {
- return r;
- }
+ ridx = radeon_copy_ring_index(rdev);
old_start = old_mem->start << PAGE_SHIFT;
new_start = new_mem->start << PAGE_SHIFT;
@@ -243,7 +239,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- radeon_fence_unref(&fence);
return -EINVAL;
}
switch (new_mem->mem_type) {
@@ -255,46 +250,23 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- radeon_fence_unref(&fence);
return -EINVAL;
}
- if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
+ if (!rdev->ring[ridx].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
- radeon_fence_unref(&fence);
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */
- old_fence = bo->sync_obj;
- if (old_fence && old_fence->ring != fence->ring
- && !radeon_fence_signaled(old_fence)) {
- bool sync_to_ring[RADEON_NUM_RINGS] = { };
- sync_to_ring[old_fence->ring] = true;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- radeon_fence_unref(&fence);
- return r;
- }
-
- r = radeon_semaphore_sync_rings(rdev, sem,
- sync_to_ring, fence->ring);
- if (r) {
- radeon_semaphore_free(rdev, sem, NULL);
- radeon_fence_unref(&fence);
- return r;
- }
- }
-
+ fence = bo->sync_obj;
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
- fence);
+ &fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
- radeon_semaphore_free(rdev, sem, fence);
radeon_fence_unref(&fence);
return r;
}
@@ -762,9 +734,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
- if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
- }
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
r = radeon_ttm_debugfs_init(rdev);
if (r) {
@@ -825,9 +795,9 @@ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
rdev = radeon_get_rdev(bo->bdev);
- mutex_lock(&rdev->vram_mutex);
+ down_read(&rdev->pm.mclk_lock);
r = ttm_vm_ops->fault(vma, vmf);
- mutex_unlock(&rdev->vram_mutex);
+ up_read(&rdev->pm.mclk_lock);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a464eb5e2df..2752f7f7823 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -426,13 +426,11 @@ static int rs400_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -470,7 +468,6 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -482,7 +479,7 @@ void rs400_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -550,20 +547,14 @@ int rs400_init(struct radeon_device *rdev)
return r;
r300_set_reg_safe(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs400_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index e95c5e61d4e..5301b3df846 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -294,6 +294,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -301,26 +302,25 @@ void rs600_hpd_init(struct radeon_device *rdev)
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
- rdev->irq.hpd[1] = true;
break;
default:
break;
}
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
- if (rdev->irq.installed)
- rs600_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enable);
}
void rs600_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -328,17 +328,17 @@ void rs600_hpd_fini(struct radeon_device *rdev)
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
- rdev->irq.hpd[1] = false;
break;
default:
break;
}
+ disable |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disable);
}
int rs600_asic_reset(struct radeon_device *rdev)
@@ -564,18 +564,18 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
@@ -686,7 +686,6 @@ int rs600_irq_process(struct radeon_device *rdev)
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true;
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
@@ -696,7 +695,7 @@ int rs600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -705,7 +704,7 @@ int rs600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
}
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -908,13 +907,11 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -956,7 +953,6 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -970,7 +966,7 @@ void rs600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1038,20 +1034,14 @@ int rs600_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs600_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 159b6a43fda..3b663fcfe06 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,13 +637,11 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -685,7 +683,6 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -699,7 +696,7 @@ void rs690_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -768,20 +765,14 @@ int rs690_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs690_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7f08cedb533..a12fbcc8ccb 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -408,13 +408,11 @@ static int rv515_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -469,7 +467,7 @@ void rv515_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
@@ -543,20 +541,14 @@ int rv515_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rv515_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b4f51c569c3..ca8ffec10ff 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -358,8 +358,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r700_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
/*
@@ -951,13 +953,11 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -994,9 +994,6 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
- /* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
@@ -1076,20 +1073,14 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -1104,7 +1095,7 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -1121,6 +1112,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
+ u32 mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -1135,6 +1128,15 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return;
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & DRM_PCIE_SPEED_50))
+ return;
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
/* advertise upconfig capability */
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 0b0279291a7..c053f819377 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1762,13 +1762,34 @@ void si_fence_ring_emit(struct radeon_device *rdev,
*/
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 header;
- if (ib->is_const_ib)
+ if (ib->is_const_ib) {
+ /* set switch buffer packet before const IB */
+ radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ radeon_ring_write(ring, 0);
+
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
- else
+ } else {
+ u32 next_rptr;
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (1 << 8));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+ }
radeon_ring_write(ring, header);
radeon_ring_write(ring,
@@ -1779,18 +1800,20 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
- /* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm_id);
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
- PACKET3_TC_ACTION_ENA |
- PACKET3_SH_KCACHE_ACTION_ENA |
- PACKET3_SH_ICACHE_ACTION_ENA);
- radeon_ring_write(ring, 0xFFFFFFFF);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
+ if (!ib->is_const_ib) {
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, ib->vm_id);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ }
}
/*
@@ -1917,10 +1940,20 @@ static int si_cp_start(struct radeon_device *rdev)
static void si_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
si_cp_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
static int si_cp_resume(struct radeon_device *rdev)
@@ -2702,7 +2735,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
- switch (ib->fence->ring) {
+ switch (ib->ring) {
case RADEON_RING_TYPE_GFX_INDEX:
ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
break;
@@ -2711,7 +2744,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
break;
default:
- dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring);
+ dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
ret = -EINVAL;
break;
}
@@ -2942,7 +2975,6 @@ static void si_disable_interrupts(struct radeon_device *rdev)
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false;
- rdev->ih.wptr = 0;
rdev->ih.rptr = 0;
}
@@ -3093,45 +3125,45 @@ int si_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
/* enable CP interrupts on all rings */
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("si_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
- rdev->irq.pflip[2]) {
+ atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("si_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
- rdev->irq.pflip[3]) {
+ atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("si_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
- rdev->irq.pflip[4]) {
+ atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("si_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
- rdev->irq.pflip[5]) {
+ atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("si_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
@@ -3359,29 +3391,27 @@ int si_irq_process(struct radeon_device *rdev)
u32 rptr;
u32 src_id, src_data, ring_id;
u32 ring_index;
- unsigned long flags;
bool queue_hotplug = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
wptr = si_get_ih_wptr(rdev);
+
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
+ return IRQ_NONE;
+
rptr = rdev->ih.rptr;
DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- spin_lock_irqsave(&rdev->ih.lock, flags);
- if (rptr == wptr) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
-restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
/* display interrupts */
si_irq_ack(rdev);
- rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
@@ -3399,7 +3429,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -3425,7 +3455,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -3451,7 +3481,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[2])
+ if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
@@ -3477,7 +3507,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[3])
+ if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
@@ -3503,7 +3533,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[4])
+ if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
@@ -3529,7 +3559,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[5])
+ if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
@@ -3620,7 +3650,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
@@ -3632,15 +3661,17 @@ restart_ih:
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
- /* make sure wptr hasn't changed while processing */
- wptr = si_get_ih_wptr(rdev);
- if (wptr != rdev->ih.wptr)
- goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = si_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
return IRQ_HANDLED;
}
@@ -3752,35 +3783,18 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d) on CP ring 0\n", r);
- rdev->accel_working = false;
- return r;
- }
-
- r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+ r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed testing IB (%d) on CP ring 1\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
- r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+ r = radeon_vm_manager_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed testing IB (%d) on CP ring 2\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
- r = radeon_vm_manager_start(rdev);
- if (r)
- return r;
-
return 0;
}
@@ -3809,12 +3823,6 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
- /* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- radeon_vm_manager_suspend(rdev);
-#if 0
- r600_blit_suspend(rdev);
-#endif
si_cp_enable(rdev, false);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
@@ -3903,17 +3911,7 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
- r = radeon_vm_manager_init(rdev);
- if (r) {
- dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
- }
-
r = si_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
@@ -3921,7 +3919,7 @@ int si_init(struct radeon_device *rdev)
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
si_pcie_gart_fini(rdev);
@@ -3950,7 +3948,7 @@ void si_fini(struct radeon_device *rdev)
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index db406796286..7869089e876 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -901,5 +901,6 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SET_CE_DE_COUNTERS 0x89
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
+#define PACKET3_SWITCH_BUFFER 0x8B
#endif
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 6eb507a5d13..1efbb907583 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1050,6 +1050,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
+ int release_idlelock = 0;
int i;
if (!dma)
@@ -1059,7 +1060,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
if (!dma->buflist)
return;
- /*i830_flush_queue(dev); */
+ if (file_priv->master && file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ release_idlelock = 1;
+ }
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
@@ -1075,7 +1079,8 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
}
}
- drm_core_reclaim_buffers(dev, file_priv);
+ if (release_idlelock)
+ drm_idlelock_release(&file_priv->master->lock);
}
struct drm_ioctl_desc savage_ioctls[] = {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 89afe0b8364..d31d4cca9a4 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -52,9 +52,9 @@ static struct drm_driver driver = {
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.load = savage_driver_load,
.firstopen = savage_driver_firstopen,
+ .preclose = savage_reclaim_buffers,
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
- .reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = &savage_driver_fops,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index dd14cd1a003..7f119870147 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -105,10 +105,9 @@ static struct drm_driver driver = {
.load = sis_driver_load,
.unload = sis_driver_unload,
.open = sis_driver_open,
+ .preclose = sis_reclaim_buffers_locked,
.postclose = sis_driver_postclose,
.dma_quiescent = sis_idle,
- .reclaim_buffers = NULL,
- .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
.fops = &sis_driver_fops,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index dd4a316c3d7..2c231070d25 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -74,7 +74,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
+ DRM_DEBUG("offset = %lu, size = %lu\n", fb->offset, fb->size);
return 0;
}
@@ -161,7 +161,7 @@ fail_alloc:
mem->size = 0;
mem->free = 0;
- DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
+ DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size,
mem->offset);
return retval;
@@ -215,7 +215,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
+ DRM_DEBUG("offset = %lu, size = %lu\n", agp->offset, agp->size);
return 0;
}
@@ -321,14 +321,20 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
struct sis_file_private *file_priv = file->driver_priv;
struct sis_memblock *entry, *next;
+ if (!(file->minor->master && file->master->lock.hw_lock))
+ return;
+
+ drm_idlelock_take(&file->master->lock);
+
mutex_lock(&dev->struct_mutex);
if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
+ drm_idlelock_release(&file->master->lock);
+
return;
}
- if (dev->driver->dma_quiescent)
- dev->driver->dma_quiescent(dev);
+ sis_idle(dev);
list_for_each_entry_safe(entry, next, &file_priv->obj_list,
@@ -343,6 +349,9 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
kfree(entry);
}
mutex_unlock(&dev->struct_mutex);
+
+ drm_idlelock_release(&file->master->lock);
+
return;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 1613c78544c..90f6b13acfa 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -54,7 +54,6 @@ static const struct file_operations tdfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
- .reclaim_buffers = drm_core_reclaim_buffers,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 56e75f0f1df..0731ab2e6c0 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -27,7 +27,7 @@ static void udl_encoder_disable(struct drm_encoder *encoder)
}
static bool udl_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 0d7816789da..f5dd89e891d 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -45,12 +45,25 @@ static char *udl_vidreg_unlock(char *buf)
* 0x01 H and V sync off (screen blank but powered)
* 0x07 DPMS powerdown (requires modeset to come back)
*/
-static char *udl_enable_hvsync(char *buf, bool enable)
+static char *udl_set_blank(char *buf, int dpms_mode)
{
- if (enable)
- return udl_set_register(buf, 0x1F, 0x00);
- else
- return udl_set_register(buf, 0x1F, 0x07);
+ u8 reg;
+ switch (dpms_mode) {
+ case DRM_MODE_DPMS_OFF:
+ reg = 0x07;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ reg = 0x05;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ reg = 0x01;
+ break;
+ case DRM_MODE_DPMS_ON:
+ reg = 0x00;
+ break;
+ }
+
+ return udl_set_register(buf, 0x1f, reg);
}
static char *udl_set_color_depth(char *buf, u8 selection)
@@ -199,6 +212,20 @@ static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
return wrptr;
}
+static char *udl_dummy_render(char *wrptr)
+{
+ *wrptr++ = 0xAF;
+ *wrptr++ = 0x6A; /* copy */
+ *wrptr++ = 0x00; /* from addr */
+ *wrptr++ = 0x00;
+ *wrptr++ = 0x00;
+ *wrptr++ = 0x01; /* one pixel */
+ *wrptr++ = 0x00; /* to address */
+ *wrptr++ = 0x00;
+ *wrptr++ = 0x00;
+ return wrptr;
+}
+
static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -235,9 +262,10 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
- buf = udl_enable_hvsync(buf, false);
+ buf = udl_set_blank(buf, mode);
buf = udl_vidreg_unlock(buf);
+ buf = udl_dummy_render(buf);
retval = udl_submit_urb(dev, urb, buf - (char *)
urb->transfer_buffer);
} else {
@@ -251,7 +279,7 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -306,9 +334,11 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
- wrptr = udl_enable_hvsync(wrptr, true);
+ wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON);
wrptr = udl_vidreg_unlock(wrptr);
+ wrptr = udl_dummy_render(wrptr);
+
ufb->active_16 = true;
if (old_fb) {
struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 02661f35f7a..e927b4c052f 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -75,6 +75,7 @@ static struct drm_driver driver = {
.load = via_driver_load,
.unload = via_driver_unload,
.open = via_driver_open,
+ .preclose = via_reclaim_buffers_locked,
.postclose = via_driver_postclose,
.context_dtor = via_final_context,
.get_vblank_counter = via_get_vblank_counter,
@@ -85,9 +86,6 @@ static struct drm_driver driver = {
.irq_uninstall = via_driver_irq_uninstall,
.irq_handler = via_driver_irq_handler,
.dma_quiescent = via_driver_dma_quiescent,
- .reclaim_buffers = drm_core_reclaim_buffers,
- .reclaim_buffers_locked = NULL,
- .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
.ioctls = via_ioctls,
.fops = &via_driver_fops,
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index a3574d09a07..acfcb358e7b 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -215,14 +215,20 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
struct via_file_private *file_priv = file->driver_priv;
struct via_memblock *entry, *next;
+ if (!(file->minor->master && file->master->lock.hw_lock))
+ return;
+
+ drm_idlelock_take(&file->master->lock);
+
mutex_lock(&dev->struct_mutex);
if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
+ drm_idlelock_release(&file->master->lock);
+
return;
}
- if (dev->driver->dma_quiescent)
- dev->driver->dma_quiescent(dev);
+ via_driver_dma_quiescent(dev);
list_for_each_entry_safe(entry, next, &file_priv->obj_list,
owner_list) {
@@ -231,5 +237,8 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
kfree(entry);
}
mutex_unlock(&dev->struct_mutex);
+
+ drm_idlelock_release(&file->master->lock);
+
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ee24d216aa8..4d9edead01a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -769,10 +769,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
goto out_no_tfile;
file_priv->driver_priv = vmw_fp;
-
- if (unlikely(dev_priv->bdev.dev_mapping == NULL))
- dev_priv->bdev.dev_mapping =
- file_priv->filp->f_path.dentry->d_inode->i_mapping;
+ dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
@@ -1147,7 +1144,6 @@ static struct drm_driver driver = {
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
- .reclaim_buffers_locked = NULL,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
.dma_quiescent = NULL, /*vmw_dma_quiescent, */
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 8b864afb40b..62e0022561b 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -60,7 +60,7 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
index 06c52cb62d2..31c735d3921 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/staging/omapdrm/omap_encoder.c
@@ -48,7 +48,7 @@ static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
}
static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 31ad880ca2e..d6b67bb9075 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -348,7 +348,6 @@ struct drm_buf {
struct drm_buf *next; /**< Kernel-only: used for free list */
__volatile__ int waiting; /**< On kernel DMA queue */
__volatile__ int pending; /**< On hardware DMA queue */
- wait_queue_head_t dma_wait; /**< Processes waiting */
struct drm_file *file_priv; /**< Private of holding file descr */
int context; /**< Kernel queue for this buffer */
int while_locked; /**< Dispatch this buffer while locked */
@@ -876,12 +875,6 @@ struct drm_driver {
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
- void (*reclaim_buffers) (struct drm_device *dev,
- struct drm_file * file_priv);
- void (*reclaim_buffers_locked) (struct drm_device *dev,
- struct drm_file *file_priv);
- void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
- struct drm_file *file_priv);
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
@@ -1108,12 +1101,8 @@ struct drm_device {
/*@} */
- /** \name DMA queues (contexts) */
+ /** \name DMA support */
/*@{ */
- int queue_count; /**< Number of active DMA queues */
- int queue_reserved; /**< Number of reserved DMA queues */
- int queue_slots; /**< Actual length of queuelist */
- struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
struct drm_device_dma *dma; /**< Optional pointer for DMA support */
/*@} */
@@ -1540,7 +1529,6 @@ extern int drm_debugfs_cleanup(struct drm_minor *minor);
/* Info file support */
extern int drm_name_info(struct seq_file *m, void *data);
extern int drm_vm_info(struct seq_file *m, void *data);
-extern int drm_queues_info(struct seq_file *m, void *data);
extern int drm_bufs_info(struct seq_file *m, void *data);
extern int drm_vblank_info(struct seq_file *m, void *data);
extern int drm_clients_info(struct seq_file *m, void* data);
@@ -1761,6 +1749,11 @@ extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
+#define DRM_PCIE_SPEED_25 1
+#define DRM_PCIE_SPEED_50 2
+#define DRM_PCIE_SPEED_80 4
+
+extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
/* platform section */
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index bac55c21511..a1a0386e016 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -676,8 +676,6 @@ struct drm_plane {
* This is used to set modes.
*/
struct drm_mode_set {
- struct list_head head;
-
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 7988e55c98d..e01cc80c9c3 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -62,7 +62,7 @@ struct drm_crtc_helper_funcs {
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -96,7 +96,7 @@ struct drm_encoder_helper_funcs {
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index 2f65633d28a..7dc38523380 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -54,7 +54,7 @@ struct drm_encoder_slave_funcs {
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
int (*mode_valid)(struct drm_encoder *encoder,
struct drm_display_mode *mode);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 564b14aa7e1..06d7f798a08 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -50,6 +50,7 @@ struct drm_mm_node {
unsigned scanned_next_free : 1;
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
+ unsigned long color;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
@@ -66,6 +67,7 @@ struct drm_mm {
spinlock_t unused_lock;
unsigned int scan_check_range : 1;
unsigned scan_alignment;
+ unsigned long scan_color;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned scan_hit_size;
@@ -73,6 +75,9 @@ struct drm_mm {
unsigned long scan_start;
unsigned long scan_end;
struct drm_mm_node *prev_scanned_node;
+
+ void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
+ unsigned long *start, unsigned long *end);
};
static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
@@ -100,11 +105,13 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end,
int atomic);
@@ -112,13 +119,13 @@ static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
- return drm_mm_get_block_generic(parent, size, alignment, 0);
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
- return drm_mm_get_block_generic(parent, size, alignment, 1);
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
@@ -127,8 +134,19 @@ static inline struct drm_mm_node *drm_mm_get_block_range(
unsigned long start,
unsigned long end)
{
- return drm_mm_get_block_range_generic(parent, size, alignment,
- start, end, 0);
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
+ start, end, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_color_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment, color,
+ start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
@@ -137,7 +155,7 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
unsigned long start,
unsigned long end)
{
- return drm_mm_get_block_range_generic(parent, size, alignment,
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 1);
}
extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
@@ -149,18 +167,59 @@ extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- int best_match);
-extern struct drm_mm_node *drm_mm_search_free_in_range(
+extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match);
+static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ bool best_match)
+{
+ return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
+}
+static inline struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
- int best_match);
-extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+ bool best_match)
+{
+ return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
+ start, end, best_match);
+}
+static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match)
+{
+ return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
+}
+static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
+{
+ return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
+ start, end, best_match);
+}
+extern int drm_mm_init(struct drm_mm *mm,
+ unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
@@ -171,10 +230,14 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm;
}
-void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
- unsigned alignment);
-void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color);
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index a7aec391b7b..7ff5c99b163 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -686,14 +686,6 @@
{0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
-#define i830_PCI_IDS \
- {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
#define gamma_PCI_IDS \
{0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
@@ -726,37 +718,3 @@
#define ffb_PCI_IDS \
{0, 0, 0}
-
-#define i915_PCI_IDS \
- {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0, 0, 0}
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index f3f82242bf1..8cc70837f92 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -200,6 +200,9 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
+#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -243,6 +246,9 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -298,6 +304,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
typedef struct drm_i915_getparam {
int param;
@@ -656,13 +663,19 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
- __u64 rsvd1;
+ __u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
@@ -886,4 +899,23 @@ struct drm_intel_sprite_colorkey {
__u32 flags;
};
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
+struct drm_i915_gem_context_create {
+ /* output: id of new context*/
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 923afb5dcf0..8e29d551bb3 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -19,8 +19,16 @@ const struct intel_gtt {
dma_addr_t scratch_page_dma;
/* for ppgtt PDE access */
u32 __iomem *gtt;
+ /* needed for ioremap in drm/i915 */
+ phys_addr_t gma_bus_addr;
} *intel_gtt_get(void);
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ struct agp_bridge_data *bridge);
+void intel_gmch_remove(void);
+
+bool intel_enable_gtt(void);
+
void intel_gtt_chipset_flush(void);
void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 5edd3a76fff..2a5769fdf8b 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -25,70 +25,6 @@
#ifndef __NOUVEAU_DRM_H__
#define __NOUVEAU_DRM_H__
-#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
-
-struct drm_nouveau_channel_alloc {
- uint32_t fb_ctxdma_handle;
- uint32_t tt_ctxdma_handle;
-
- int channel;
- uint32_t pushbuf_domains;
-
- /* Notifier memory */
- uint32_t notifier_handle;
-
- /* DRM-enforced subchannel assignments */
- struct {
- uint32_t handle;
- uint32_t grclass;
- } subchan[8];
- uint32_t nr_subchan;
-};
-
-struct drm_nouveau_channel_free {
- int channel;
-};
-
-struct drm_nouveau_grobj_alloc {
- int channel;
- uint32_t handle;
- int class;
-};
-
-struct drm_nouveau_notifierobj_alloc {
- uint32_t channel;
- uint32_t handle;
- uint32_t size;
- uint32_t offset;
-};
-
-struct drm_nouveau_gpuobj_free {
- int channel;
- uint32_t handle;
-};
-
-/* FIXME : maybe unify {GET,SET}PARAMs */
-#define NOUVEAU_GETPARAM_PCI_VENDOR 3
-#define NOUVEAU_GETPARAM_PCI_DEVICE 4
-#define NOUVEAU_GETPARAM_BUS_TYPE 5
-#define NOUVEAU_GETPARAM_FB_SIZE 8
-#define NOUVEAU_GETPARAM_AGP_SIZE 9
-#define NOUVEAU_GETPARAM_CHIPSET_ID 11
-#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
-#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
-#define NOUVEAU_GETPARAM_PTIMER_TIME 14
-#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
-#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
-struct drm_nouveau_getparam {
- uint64_t param;
- uint64_t value;
-};
-
-struct drm_nouveau_setparam {
- uint64_t param;
- uint64_t value;
-};
-
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
@@ -180,35 +116,19 @@ struct drm_nouveau_gem_cpu_fini {
uint32_t handle;
};
-enum nouveau_bus_type {
- NV_AGP = 0,
- NV_PCI = 1,
- NV_PCIE = 2,
-};
-
-struct drm_nouveau_sarea {
-};
-
-#define DRM_NOUVEAU_GETPARAM 0x00
-#define DRM_NOUVEAU_SETPARAM 0x01
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
-#define DRM_NOUVEAU_CHANNEL_FREE 0x03
-#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
-#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
-#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
+#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
#define DRM_NOUVEAU_GEM_INFO 0x44
-#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
-#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
-#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
-#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
-#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
index 035b804dda6..df3763222d7 100644
--- a/include/drm/sis_drm.h
+++ b/include/drm/sis_drm.h
@@ -51,17 +51,17 @@
typedef struct {
int context;
- unsigned int offset;
- unsigned int size;
+ unsigned long offset;
+ unsigned long size;
unsigned long free;
} drm_sis_mem_t;
typedef struct {
- unsigned int offset, size;
+ unsigned long offset, size;
} drm_sis_agp_t;
typedef struct {
- unsigned int offset, size;
+ unsigned long offset, size;
} drm_sis_fb_t;
struct sis_file_private {
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index a05f1b55714..084e8989a6e 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -39,8 +39,6 @@
#include "linux/fs.h"
#include "linux/spinlock.h"
-struct ttm_backend;
-
struct ttm_backend_func {
/**
* struct ttm_backend_func member bind
@@ -119,7 +117,6 @@ struct ttm_tt {
unsigned long num_pages;
struct sg_table *sg; /* for SG objects via dma-buf */
struct ttm_bo_global *glob;
- struct ttm_backend *be;
struct file *swap_storage;
enum ttm_caching_state caching_state;
enum {
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 53274bff577..7fb75b14375 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -543,6 +543,11 @@
#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */