diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/rs600.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/rs600.c | 275 |
1 files changed, 179 insertions, 96 deletions
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index e95c5e61d4e..d1a35cb1c91 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -35,7 +35,7 @@ * close to the one of the R600 family (R600 likely being an evolution * of the RS600 GART block). */ -#include "drmP.h" +#include <drm/drmP.h> #include "radeon.h" #include "radeon_asic.h" #include "atom.h" @@ -43,41 +43,73 @@ #include "rs600_reg_safe.h" -void rs600_gpu_init(struct radeon_device *rdev); +static void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); -void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) +static const u32 crtc_offsets[2] = { - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; - int i; + 0, + AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL +}; - if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) { - for (i = 0; i < rdev->usec_timeout; i++) { - if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)) - break; - udelay(1); - } - for (i = 0; i < rdev->usec_timeout; i++) { - if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK) - break; - udelay(1); - } - } +static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc) +{ + if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) + return true; + else + return false; } -void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) +static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc) { - /* enable the pflip int */ - radeon_irq_kms_pflip_irq_get(rdev, crtc); + u32 pos1, pos2; + + pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]); + pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]); + + if (pos1 != pos2) + return true; + else + return false; } -void rs600_post_page_flip(struct radeon_device *rdev, int crtc) +/** + * avivo_wait_for_vblank - vblank wait asic callback. + * + * @rdev: radeon_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (r5xx-r7xx). + */ +void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) { - /* disable the pflip int */ - radeon_irq_kms_pflip_irq_put(rdev, crtc); + unsigned i = 0; + + if (crtc >= rdev->num_crtc) + return; + + if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN)) + return; + + /* depending on when we hit vblank, we may be close to active; if so, + * wait for another frame. + */ + while (avivo_is_in_vblank(rdev, crtc)) { + if (i++ % 100 == 0) { + if (!avivo_is_counter_moving(rdev, crtc)) + break; + } + } + + while (!avivo_is_in_vblank(rdev, crtc)) { + if (i++ % 100 == 0) { + if (!avivo_is_counter_moving(rdev, crtc)) + break; + } + } } -u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); @@ -104,9 +136,79 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) /* Unlock the lock, so double-buffering can take place inside vblank */ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); +} + +bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; /* Return current update_pending status: */ - return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; + return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & + AVIVO_D1GRPH_SURFACE_UPDATE_PENDING); +} + +void avivo_program_fmt(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + int bpc = 0; + u32 tmp = 0; + enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + bpc = radeon_get_monitor_bpc(connector); + dither = radeon_connector->dither; + } + + /* LVDS FMT is set up by atom */ + if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) + return; + + if (bpc == 0) + return; + + switch (bpc) { + case 6: + if (dither == RADEON_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; + else + tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN; + break; + case 8: + if (dither == RADEON_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN | + AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH); + else + tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN | + AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH); + break; + case 10: + default: + /* not needed */ + break; + } + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: + WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp); + break; + case ENCODER_OBJECT_ID_INTERNAL_LVTM1: + WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp); + break; + case ENCODER_OBJECT_ID_INTERNAL_DDI: + WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp); + break; + default: + break; + } } void rs600_pm_misc(struct radeon_device *rdev) @@ -294,6 +396,7 @@ void rs600_hpd_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned enable = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -301,26 +404,25 @@ void rs600_hpd_init(struct radeon_device *rdev) case RADEON_HPD_1: WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); - rdev->irq.hpd[0] = true; break; case RADEON_HPD_2: WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); - rdev->irq.hpd[1] = true; break; default: break; } + enable |= 1 << radeon_connector->hpd.hpd; radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } - if (rdev->irq.installed) - rs600_irq_set(rdev); + radeon_irq_kms_enable_hpd(rdev, enable); } void rs600_hpd_fini(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned disable = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -328,17 +430,17 @@ void rs600_hpd_fini(struct radeon_device *rdev) case RADEON_HPD_1: WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); - rdev->irq.hpd[0] = false; break; case RADEON_HPD_2: WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); - rdev->irq.hpd[1] = false; break; default: break; } + disable |= 1 << radeon_connector->hpd.hpd; } + radeon_irq_kms_disable_hpd(rdev, disable); } int rs600_asic_reset(struct radeon_device *rdev) @@ -424,7 +526,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) tmp = RREG32_MC(R_000100_MC_PT0_CNTL); } -int rs600_gart_init(struct radeon_device *rdev) +static int rs600_gart_init(struct radeon_device *rdev) { int r; @@ -506,7 +608,7 @@ static int rs600_gart_enable(struct radeon_device *rdev) return 0; } -void rs600_gart_disable(struct radeon_device *rdev) +static void rs600_gart_disable(struct radeon_device *rdev) { u32 tmp; @@ -517,31 +619,23 @@ void rs600_gart_disable(struct radeon_device *rdev) radeon_gart_table_vram_unpin(rdev); } -void rs600_gart_fini(struct radeon_device *rdev) +static void rs600_gart_fini(struct radeon_device *rdev) { radeon_gart_fini(rdev); rs600_gart_disable(rdev); radeon_gart_table_vram_free(rdev); } -#define R600_PTE_VALID (1 << 0) -#define R600_PTE_SYSTEM (1 << 1) -#define R600_PTE_SNOOPED (1 << 2) -#define R600_PTE_READABLE (1 << 5) -#define R600_PTE_WRITEABLE (1 << 6) - -int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr) { void __iomem *ptr = (void *)rdev->gart.ptr; - if (i < 0 || i > rdev->gart.num_gpu_pages) { - return -EINVAL; - } addr = addr & 0xFFFFFFFFFFFFF000ULL; - addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; - addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; + if (addr == rdev->dummy_page.addr) + addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; + else + addr |= R600_PTE_GART; writeq(addr, ptr + (i * 8)); - return 0; } int rs600_irq_set(struct radeon_device *rdev) @@ -564,18 +658,15 @@ int rs600_irq_set(struct radeon_device *rdev) WREG32(R_000040_GEN_INT_CNTL, 0); return -EINVAL; } - if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { tmp |= S_000040_SW_INT_EN(1); } - if (rdev->irq.gui_idle) { - tmp |= S_000040_GUI_IDLE(1); - } if (rdev->irq.crtc_vblank_int[0] || - rdev->irq.pflip[0]) { + atomic_read(&rdev->irq.pflip[0])) { mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); } if (rdev->irq.crtc_vblank_int[1] || - rdev->irq.pflip[1]) { + atomic_read(&rdev->irq.pflip[1])) { mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); } if (rdev->irq.hpd[0]) { @@ -602,12 +693,6 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev) uint32_t irq_mask = S_000044_SW_INT(1); u32 tmp; - /* the interrupt works, but the status bit is permanently asserted */ - if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { - if (!rdev->irq.gui_idle_acked) - irq_mask |= S_000044_GUI_IDLE_STAT(1); - } - if (G_000044_DISPLAY_INT_STAT(irqs)) { rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { @@ -667,9 +752,6 @@ int rs600_irq_process(struct radeon_device *rdev) bool queue_hotplug = false; bool queue_hdmi = false; - /* reset gui idle ack. the status bit is broken */ - rdev->irq.gui_idle_acked = false; - status = rs600_irq_ack(rdev); if (!status && !rdev->irq.stat_regs.r500.disp_int && @@ -683,12 +765,6 @@ int rs600_irq_process(struct radeon_device *rdev) if (G_000044_SW_INT(status)) { radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); } - /* GUI idle */ - if (G_000040_GUI_IDLE(status)) { - rdev->irq.gui_idle_acked = true; - rdev->pm.gui_idle = true; - wake_up(&rdev->irq.idle_queue); - } /* Vertical blank interrupts */ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (rdev->irq.crtc_vblank_int[0]) { @@ -696,8 +772,8 @@ int rs600_irq_process(struct radeon_device *rdev) rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[0]) - radeon_crtc_handle_flip(rdev, 0); + if (atomic_read(&rdev->irq.pflip[0])) + radeon_crtc_handle_vblank(rdev, 0); } if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (rdev->irq.crtc_vblank_int[1]) { @@ -705,8 +781,8 @@ int rs600_irq_process(struct radeon_device *rdev) rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[1]) - radeon_crtc_handle_flip(rdev, 1); + if (atomic_read(&rdev->irq.pflip[1])) + radeon_crtc_handle_vblank(rdev, 1); } if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { queue_hotplug = true; @@ -722,8 +798,6 @@ int rs600_irq_process(struct radeon_device *rdev) } status = rs600_irq_ack(rdev); } - /* reset gui idle ack. the status bit is broken */ - rdev->irq.gui_idle_acked = false; if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_hdmi) @@ -765,7 +839,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -void rs600_gpu_init(struct radeon_device *rdev) +static void rs600_gpu_init(struct radeon_device *rdev) { r420_pipes_init(rdev); /* Wait for mc idle */ @@ -773,7 +847,7 @@ void rs600_gpu_init(struct radeon_device *rdev) dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); } -void rs600_mc_init(struct radeon_device *rdev) +static void rs600_mc_init(struct radeon_device *rdev) { u64 base; @@ -823,19 +897,29 @@ void rs600_bandwidth_update(struct radeon_device *rdev) uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) { + unsigned long flags; + u32 r; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | S_000070_MC_IND_CITF_ARB0(1)); - return RREG32(R_000074_MC_IND_DATA); + r = RREG32(R_000074_MC_IND_DATA); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); + return r; } void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { + unsigned long flags; + + spin_lock_irqsave(&rdev->mc_idx_lock, flags); WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); WREG32(R_000074_MC_IND_DATA, v); + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); } -void rs600_debugfs(struct radeon_device *rdev) +static void rs600_debugfs(struct radeon_device *rdev) { if (r100_debugfs_rbbm_init(rdev)) DRM_ERROR("Failed to register debugfs file for RBBM !\n"); @@ -899,6 +983,12 @@ static int rs600_startup(struct radeon_device *rdev) } /* Enable IRQ */ + if (!rdev->irq.installed) { + r = radeon_irq_kms_init(rdev); + if (r) + return r; + } + rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ @@ -908,13 +998,11 @@ static int rs600_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) - return r; - - r = radeon_ib_ring_tests(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = r600_audio_init(rdev); if (r) { @@ -956,7 +1044,7 @@ int rs600_resume(struct radeon_device *rdev) int rs600_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); + radeon_pm_suspend(rdev); r600_audio_fini(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); @@ -967,10 +1055,11 @@ int rs600_suspend(struct radeon_device *rdev) void rs600_fini(struct radeon_device *rdev) { + radeon_pm_fini(rdev); r600_audio_fini(rdev); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); rs600_gart_fini(rdev); radeon_irq_kms_fini(rdev); @@ -1026,9 +1115,6 @@ int rs600_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; /* Memory manager */ r = radeon_bo_init(rdev); if (r) @@ -1038,20 +1124,17 @@ int rs600_init(struct radeon_device *rdev) return r; rs600_set_safe_registers(rdev); - r = radeon_ib_pool_init(rdev); - rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } + /* Initialize power management */ + radeon_pm_init(rdev); + rdev->accel_working = true; r = rs600_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); rs600_gart_fini(rdev); radeon_irq_kms_fini(rdev); rdev->accel_working = false; |
