aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/rs600.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/rs600.c')
-rw-r--r--drivers/gpu/drm/radeon/rs600.c458
1 files changed, 328 insertions, 130 deletions
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02c2e6..d1a35cb1c91 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -35,7 +35,7 @@
* close to the one of the R600 family (R600 likely being an evolution
* of the RS600 GART block).
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
@@ -43,9 +43,174 @@
#include "rs600_reg_safe.h"
-void rs600_gpu_init(struct radeon_device *rdev);
+static void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
+void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+ unsigned i = 0;
+
+ if (crtc >= rdev->num_crtc)
+ return;
+
+ if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+}
+
+void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+}
+
+bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+
+ /* Return current update_pending status: */
+ return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
+ AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
+}
+
+void avivo_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+ else
+ tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
+ AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
+ AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
void rs600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
@@ -75,7 +240,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
udelay(voltage->delay);
}
} else if (voltage->type == VOLTAGE_VDDC)
- radeon_atom_set_voltage(rdev, voltage->vddc_id);
+ radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
@@ -131,7 +296,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
- rdev->asic->set_pcie_lanes &&
+ rdev->asic->pm.set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
@@ -231,6 +396,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -238,25 +404,25 @@ void rs600_hpd_init(struct radeon_device *rdev)
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
- rdev->irq.hpd[1] = true;
break;
default:
break;
}
+ enable |= 1 << radeon_connector->hpd.hpd;
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
- if (rdev->irq.installed)
- rs600_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enable);
}
void rs600_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -264,41 +430,31 @@ void rs600_hpd_fini(struct radeon_device *rdev)
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
- rdev->irq.hpd[1] = false;
break;
default:
break;
}
+ disable |= 1 << radeon_connector->hpd.hpd;
}
-}
-
-void rs600_bm_disable(struct radeon_device *rdev)
-{
- u32 tmp;
-
- /* disable bus mastering */
- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
- mdelay(1);
+ radeon_irq_kms_disable_hpd(rdev, disable);
}
int rs600_asic_reset(struct radeon_device *rdev)
{
- u32 status, tmp;
-
struct rv515_mc_save save;
+ u32 status, tmp;
+ int ret = 0;
- /* Stops all mc clients */
- rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
@@ -310,7 +466,8 @@ int rs600_asic_reset(struct radeon_device *rdev)
WREG32(RADEON_CP_RB_CNTL, tmp);
pci_save_state(rdev->pdev);
/* disable bus mastering */
- rs600_bm_disable(rdev);
+ pci_clear_master(rdev->pdev);
+ mdelay(1);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
@@ -341,12 +498,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
- return -1;
- }
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
rv515_mc_resume(rdev, &save);
- dev_info(rdev->dev, "GPU reset succeed\n");
- return 0;
+ return ret;
}
/*
@@ -370,11 +526,11 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
}
-int rs600_gart_init(struct radeon_device *rdev)
+static int rs600_gart_init(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.table.vram.robj) {
+ if (rdev->gart.robj) {
WARN(1, "RS600 GART already initialized\n");
return 0;
}
@@ -387,12 +543,12 @@ int rs600_gart_init(struct radeon_device *rdev)
return radeon_gart_table_vram_alloc(rdev);
}
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
- if (rdev->gart.table.vram.robj == NULL) {
+ if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
@@ -401,8 +557,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Enable bus master */
- tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
- WREG32(R_00004C_BUS_CNTL, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
@@ -445,54 +601,41 @@ int rs600_gart_enable(struct radeon_device *rdev)
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
rs600_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
-void rs600_gart_disable(struct radeon_device *rdev)
+static void rs600_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int r;
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
- if (rdev->gart.table.vram.robj) {
- r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
- if (r == 0) {
- radeon_bo_kunmap(rdev->gart.table.vram.robj);
- radeon_bo_unpin(rdev->gart.table.vram.robj);
- radeon_bo_unreserve(rdev->gart.table.vram.robj);
- }
- }
+ radeon_gart_table_vram_unpin(rdev);
}
-void rs600_gart_fini(struct radeon_device *rdev)
+static void rs600_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rs600_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
}
-#define R600_PTE_VALID (1 << 0)
-#define R600_PTE_SYSTEM (1 << 1)
-#define R600_PTE_SNOOPED (1 << 2)
-#define R600_PTE_READABLE (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ void __iomem *ptr = (void *)rdev->gart.ptr;
- if (i < 0 || i > rdev->gart.num_gpu_pages) {
- return -EINVAL;
- }
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
- writeq(addr, ((void __iomem *)ptr) + (i * 8));
- return 0;
+ if (addr == rdev->dummy_page.addr)
+ addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ else
+ addr |= R600_PTE_GART;
+ writeq(addr, ptr + (i * 8));
}
int rs600_irq_set(struct radeon_device *rdev)
@@ -503,22 +646,27 @@ int rs600_irq_set(struct radeon_device *rdev)
~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ u32 hdmi0;
+ if (ASIC_IS_DCE2(rdev))
+ hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ else
+ hdmi0 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= S_000040_SW_INT_EN(1);
}
- if (rdev->irq.gui_idle) {
- tmp |= S_000040_GUI_IDLE(1);
- }
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ atomic_read(&rdev->irq.pflip[0])) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ atomic_read(&rdev->irq.pflip[1])) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
@@ -527,49 +675,59 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.hpd[1]) {
hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
}
+ if (rdev->irq.afmt[0]) {
+ hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ }
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ if (ASIC_IS_DCE2(rdev))
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
return 0;
}
-static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
- /* the interrupt works, but the status bit is permanently asserted */
- if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
- if (!rdev->irq.gui_idle_acked)
- irq_mask |= S_000044_GUI_IDLE_STAT(1);
- }
-
if (G_000044_DISPLAY_INT_STAT(irqs)) {
- *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+ rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006534_D1MODE_VBLANK_STATUS,
S_006534_D1MODE_VBLANK_ACK(1));
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
} else {
- *r500_disp_int = 0;
+ rdev->irq.stat_regs.r500.disp_int = 0;
}
+ if (ASIC_IS_DCE2(rdev)) {
+ rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+ S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ } else
+ rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
if (irqs) {
WREG32(R_000044_GEN_INT_STATUS, irqs);
}
@@ -578,63 +736,72 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
void rs600_irq_disable(struct radeon_device *rdev)
{
- u32 tmp;
-
+ u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
mdelay(1);
- rs600_irq_ack(rdev, &tmp);
+ rs600_irq_ack(rdev);
}
int rs600_irq_process(struct radeon_device *rdev)
{
- uint32_t status, msi_rearm;
- uint32_t r500_disp_int;
+ u32 status, msi_rearm;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
- /* reset gui idle ack. the status bit is broken */
- rdev->irq.gui_idle_acked = false;
-
- status = rs600_irq_ack(rdev, &r500_disp_int);
- if (!status && !r500_disp_int) {
+ status = rs600_irq_ack(rdev);
+ if (!status &&
+ !rdev->irq.stat_regs.r500.disp_int &&
+ !rdev->irq.stat_regs.r500.hdmi0_status) {
return IRQ_NONE;
}
- while (status || r500_disp_int) {
+ while (status ||
+ rdev->irq.stat_regs.r500.disp_int ||
+ rdev->irq.stat_regs.r500.hdmi0_status) {
/* SW interrupt */
- if (G_000044_SW_INT(status))
- radeon_fence_process(rdev);
- /* GUI idle */
- if (G_000040_GUI_IDLE(status)) {
- rdev->irq.gui_idle_acked = true;
- rdev->pm.gui_idle = true;
- wake_up(&rdev->irq.idle_queue);
+ if (G_000044_SW_INT(status)) {
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* Vertical blank interrupts */
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_vblank(rdev, 0);
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_vblank(rdev, 1);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD1\n");
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
- status = rs600_irq_ack(rdev, &r500_disp_int);
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ queue_hdmi = true;
+ DRM_DEBUG("HDMI0\n");
+ }
+ status = rs600_irq_ack(rdev);
}
- /* reset gui idle ack. the status bit is broken */
- rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -645,9 +812,7 @@ int rs600_irq_process(struct radeon_device *rdev)
WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
break;
default:
- msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
- WREG32(RADEON_MSI_REARM_EN, msi_rearm);
- WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
break;
}
}
@@ -674,7 +839,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void rs600_gpu_init(struct radeon_device *rdev)
+static void rs600_gpu_init(struct radeon_device *rdev)
{
r420_pipes_init(rdev);
/* Wait for mc idle */
@@ -682,7 +847,7 @@ void rs600_gpu_init(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
-void rs600_mc_init(struct radeon_device *rdev)
+static void rs600_mc_init(struct radeon_device *rdev)
{
u64 base;
@@ -693,7 +858,6 @@ void rs600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
@@ -733,19 +897,29 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1));
- return RREG32(R_000074_MC_IND_DATA);
+ r = RREG32(R_000074_MC_IND_DATA);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+ return r;
}
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
WREG32(R_000074_MC_IND_DATA, v);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
-void rs600_debugfs(struct radeon_device *rdev)
+static void rs600_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev))
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
@@ -802,18 +976,31 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
- dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_init(rdev);
if (r) {
- dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
@@ -828,6 +1015,8 @@ static int rs600_startup(struct radeon_device *rdev)
int rs600_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
rs600_gart_disable(rdev);
/* Resume clock before doing reset */
@@ -844,11 +1033,18 @@ int rs600_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return rs600_startup(rdev);
+
+ rdev->accel_working = true;
+ r = rs600_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int rs600_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -859,10 +1055,11 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -918,9 +1115,6 @@ int rs600_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
@@ -929,6 +1123,10 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
@@ -936,7 +1134,7 @@ int rs600_init(struct radeon_device *rdev)
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;