diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/si.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/si.c | 1234 |
1 files changed, 650 insertions, 584 deletions
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d325280e2f9..9e854fd016d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -39,35 +39,43 @@ MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); MODULE_FIRMWARE("radeon/TAHITI_me.bin"); MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); +MODULE_FIRMWARE("radeon/TAHITI_mc2.bin"); MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); MODULE_FIRMWARE("radeon/TAHITI_smc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); +MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin"); MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); MODULE_FIRMWARE("radeon/VERDE_ce.bin"); MODULE_FIRMWARE("radeon/VERDE_mc.bin"); +MODULE_FIRMWARE("radeon/VERDE_mc2.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); MODULE_FIRMWARE("radeon/VERDE_smc.bin"); MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); +MODULE_FIRMWARE("radeon/OLAND_mc2.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); MODULE_FIRMWARE("radeon/OLAND_smc.bin"); MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); MODULE_FIRMWARE("radeon/HAINAN_me.bin"); MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_mc2.bin"); MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); +static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); static void si_program_aspm(struct radeon_device *rdev); +extern void sumo_rlc_fini(struct radeon_device *rdev); +extern int sumo_rlc_init(struct radeon_device *rdev); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); @@ -76,6 +84,13 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); extern bool evergreen_is_display_hung(struct radeon_device *rdev); +static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, + bool enable); +static void si_init_pg(struct radeon_device *rdev); +static void si_init_cg(struct radeon_device *rdev); +static void si_fini_pg(struct radeon_device *rdev); +static void si_fini_cg(struct radeon_device *rdev); +static void si_rlc_stop(struct radeon_device *rdev); static const u32 verde_rlc_save_restore_register_list[] = { @@ -1453,41 +1468,38 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { }; /* ucode loading */ -static int si_mc_load_microcode(struct radeon_device *rdev) +int si_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; u32 running, blackout = 0; u32 *io_mc_regs; - int i, ucode_size, regs_size; + int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; + ucode_size = rdev->mc_fw->size / 4; + switch (rdev->family) { case CHIP_TAHITI: io_mc_regs = (u32 *)&tahiti_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_PITCAIRN: io_mc_regs = (u32 *)&pitcairn_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_VERDE: default: io_mc_regs = (u32 *)&verde_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_OLAND: io_mc_regs = (u32 *)&oland_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_HAINAN: io_mc_regs = (u32 *)&hainan_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; } @@ -1543,7 +1555,7 @@ static int si_init_microcode(struct radeon_device *rdev) const char *chip_name; const char *rlc_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; - size_t smc_req_size; + size_t smc_req_size, mc2_req_size; char fw_name[30]; int err; @@ -1558,6 +1570,7 @@ static int si_init_microcode(struct radeon_device *rdev) ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = TAHITI_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4); break; case CHIP_PITCAIRN: @@ -1568,6 +1581,7 @@ static int si_init_microcode(struct radeon_device *rdev) ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4); break; case CHIP_VERDE: @@ -1578,6 +1592,7 @@ static int si_init_microcode(struct radeon_device *rdev) ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = VERDE_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4); break; case CHIP_OLAND: @@ -1587,7 +1602,7 @@ static int si_init_microcode(struct radeon_device *rdev) me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4); break; case CHIP_HAINAN: @@ -1597,7 +1612,7 @@ static int si_init_microcode(struct radeon_device *rdev) me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4); break; default: BUG(); @@ -1650,22 +1665,33 @@ static int si_init_microcode(struct radeon_device *rdev) err = -EINVAL; } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->mc_fw->size != mc_req_size) { + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)) { printk(KERN_ERR "si_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); err = -EINVAL; } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + err = 0; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "si_smc: Bogus length %zu in firmware \"%s\"\n", rdev->smc_fw->size, fw_name); @@ -1700,7 +1726,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { - u32 tmp; + u32 tmp, buffer_alloc, i; + u32 pipe_offset = radeon_crtc->crtc_id * 0x20; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. @@ -1715,16 +1742,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, * non-linked crtcs for maximum line buffer allocation. */ if (radeon_crtc->base.enabled && mode) { - if (other_mode) + if (other_mode) { tmp = 0; /* 1/2 */ - else + buffer_alloc = 1; + } else { tmp = 2; /* whole */ - } else + buffer_alloc = 2; + } + } else { tmp = 0; + buffer_alloc = 0; + } WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, DC_LB_MEMORY_CONFIG(tmp)); + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: @@ -2784,7 +2825,7 @@ static void si_setup_spi(struct radeon_device *rdev, } static u32 si_get_rb_disabled(struct radeon_device *rdev, - u32 max_rb_num, u32 se_num, + u32 max_rb_num_per_se, u32 sh_per_se) { u32 data, mask; @@ -2798,14 +2839,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev, data >>= BACKEND_DISABLE_SHIFT; - mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); + mask = si_create_bitmask(max_rb_num_per_se / sh_per_se); return data & mask; } static void si_setup_rb(struct radeon_device *rdev, u32 se_num, u32 sh_per_se, - u32 max_rb_num) + u32 max_rb_num_per_se) { int i, j; u32 data, mask; @@ -2815,19 +2856,21 @@ static void si_setup_rb(struct radeon_device *rdev, for (i = 0; i < se_num; i++) { for (j = 0; j < sh_per_se; j++) { si_select_se_sh(rdev, i, j); - data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); + data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se); disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); } } si_select_se_sh(rdev, 0xffffffff, 0xffffffff); mask = 1; - for (i = 0; i < max_rb_num; i++) { + for (i = 0; i < max_rb_num_per_se * se_num; i++) { if (!(disabled_rbs & mask)) enabled_rbs |= mask; mask <<= 1; } + rdev->config.si.backend_enable_mask = enabled_rbs; + for (i = 0; i < se_num; i++) { si_select_se_sh(rdev, i, 0xffffffff); data = 0; @@ -2858,7 +2901,7 @@ static void si_gpu_init(struct radeon_device *rdev) u32 sx_debug_1; u32 hdp_host_path_cntl; u32 tmp; - int i, j; + int i, j, k; switch (rdev->family) { case CHIP_TAHITI: @@ -3056,6 +3099,14 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.max_sh_per_se, rdev->config.si.max_cu_per_sh); + for (i = 0; i < rdev->config.si.max_shader_engines; i++) { + for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { + for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { + rdev->config.si.active_cus += + hweight32(si_get_cu_active_bitmap(rdev, i, j)); + } + } + } /* set HW defaults for 3D engine */ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | @@ -3144,7 +3195,7 @@ void si_fence_ring_emit(struct radeon_device *rdev, /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); @@ -3177,7 +3228,7 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, (1 << 8)); radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); radeon_ring_write(ring, next_rptr); } @@ -3218,7 +3269,8 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable) if (enable) WREG32(CP_ME_CNTL, 0); else { - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); WREG32(SCRATCH_UMSK, 0); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; @@ -3360,16 +3412,7 @@ static int si_cp_resume(struct radeon_device *rdev) u32 rb_bufsz; int r; - /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ - WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | - SOFT_RESET_PA | - SOFT_RESET_VGT | - SOFT_RESET_SPI | - SOFT_RESET_SX)); - RREG32(GRBM_SOFT_RESET); - mdelay(15); - WREG32(GRBM_SOFT_RESET, 0); - RREG32(GRBM_SOFT_RESET); + si_enable_gui_idle_interrupt(rdev, false); WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); @@ -3383,8 +3426,8 @@ static int si_cp_resume(struct radeon_device *rdev) /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3411,13 +3454,11 @@ static int si_cp_resume(struct radeon_device *rdev) WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB0_RPTR); - /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3437,13 +3478,11 @@ static int si_cp_resume(struct radeon_device *rdev) WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB1_RPTR); - /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -3463,8 +3502,6 @@ static int si_cp_resume(struct radeon_device *rdev) WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB2_RPTR); - /* start the rings */ si_cp_start(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; @@ -3486,10 +3523,15 @@ static int si_cp_resume(struct radeon_device *rdev) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; } + si_enable_gui_idle_interrupt(rdev, true); + + if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + return 0; } -static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) +u32 si_gpu_check_soft_reset(struct radeon_device *rdev) { u32 reset_mask = 0; u32 tmp; @@ -3587,6 +3629,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); + /* disable PG/CG */ + si_fini_pg(rdev); + si_fini_cg(rdev); + + /* stop the rlc */ + si_rlc_stop(rdev); + /* Disable CP parsing/prefetching */ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); @@ -3695,6 +3744,106 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) evergreen_print_gpu_status_regs(rdev); } +static void si_set_clk_bypass_mode(struct radeon_device *rdev) +{ + u32 tmp, i; + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_BYPASS_EN; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp |= SPLL_CTLREQ_CHG; + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) + break; + udelay(1); + } + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + tmp = RREG32(MPLL_CNTL_MODE); + tmp &= ~MPLL_MCLK_SEL; + WREG32(MPLL_CNTL_MODE, tmp); +} + +static void si_spll_powerdown(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = RREG32(SPLL_CNTL_MODE); + tmp |= SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_RESET; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_SLEEP; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(SPLL_CNTL_MODE); + tmp &= ~SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); +} + +static void si_gpu_pci_config_reset(struct radeon_device *rdev) +{ + struct evergreen_mc_save save; + u32 tmp, i; + + dev_info(rdev->dev, "GPU pci config reset\n"); + + /* disable dpm? */ + + /* disable cg/pg */ + si_fini_pg(rdev); + si_fini_cg(rdev); + + /* Disable CP parsing/prefetching */ + WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); + /* dma0 */ + tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); + /* dma1 */ + tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); + /* XXX other engines? */ + + /* halt the rlc, disable cp internal ints */ + si_rlc_stop(rdev); + + udelay(50); + + /* disable mem access */ + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); + } + + /* set mclk/sclk to bypass */ + si_set_clk_bypass_mode(rdev); + /* powerdown spll */ + si_spll_powerdown(rdev); + /* disable BM */ + pci_clear_master(rdev->pdev); + /* reset */ + radeon_pci_config_reset(rdev); + /* wait for asic to come out of reset */ + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) + break; + udelay(1); + } +} + int si_asic_reset(struct radeon_device *rdev) { u32 reset_mask; @@ -3704,10 +3853,17 @@ int si_asic_reset(struct radeon_device *rdev) if (reset_mask) r600_set_bios_scratch_engine_hung(rdev, true); + /* try soft reset */ si_gpu_soft_reset(rdev, reset_mask); reset_mask = si_gpu_check_soft_reset(rdev); + /* try pci config reset */ + if (reset_mask && radeon_hard_reset) + si_gpu_pci_config_reset(rdev); + + reset_mask = si_gpu_check_soft_reset(rdev); + if (!reset_mask) r600_set_bios_scratch_engine_hung(rdev, false); @@ -3730,39 +3886,9 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} - -/** - * si_dma_is_lockup - Check if the DMA engine is locked up - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Check if the async DMA engine is locked up. - * Returns true if the engine appears to be locked up, false if not. - */ -bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 reset_mask = si_gpu_check_soft_reset(rdev); - u32 mask; - - if (ring->idx == R600_RING_TYPE_DMA_INDEX) - mask = RADEON_RESET_DMA; - else - mask = RADEON_RESET_DMA1; - - if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); - return false; - } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -3883,8 +4009,15 @@ static int si_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* size in MB on si */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + tmp = RREG32(CONFIG_MEMSIZE); + /* some boards may have garbage in the upper 16 bits */ + if (tmp & 0xffff0000) { + DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); + if (tmp & 0xffff) + tmp &= 0xffff; + } + rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL; + rdev->mc.real_vram_size = rdev->mc.mc_vram_size; rdev->mc.visible_vram_size = rdev->mc.aper_size; si_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -3920,18 +4053,21 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | ENABLE_L1_TLB | + ENABLE_L1_FRAGMENT_PROCESSING | SYSTEM_ACCESS_MODE_NOT_IN_SYS | ENABLE_ADVANCED_DRIVER_MODEL | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | + ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | EFFECTIVE_L2_QUEUE_SIZE(7) | CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | - L2_CACHE_BIGK_FRAGMENT_SIZE(0)); + BANK_SELECT(4) | + L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); @@ -3968,6 +4104,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | @@ -4079,13 +4216,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev, return 0; } +static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx) +{ + u32 start_reg, reg, i; + u32 command = ib[idx + 4]; + u32 info = ib[idx + 1]; + u32 idx_value = ib[idx]; + if (command & PACKET3_CP_DMA_CMD_SAS) { + /* src address space is register */ + if (((info & 0x60000000) >> 29) == 0) { + start_reg = idx_value << 2; + if (command & PACKET3_CP_DMA_CMD_SAIC) { + reg = start_reg; + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad SRC register\n"); + return -EINVAL; + } + } else { + for (i = 0; i < (command & 0x1fffff); i++) { + reg = start_reg + (4 * i); + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad SRC register\n"); + return -EINVAL; + } + } + } + } + } + if (command & PACKET3_CP_DMA_CMD_DAS) { + /* dst address space is register */ + if (((info & 0x00300000) >> 20) == 0) { + start_reg = ib[idx + 2]; + if (command & PACKET3_CP_DMA_CMD_DAIC) { + reg = start_reg; + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad DST register\n"); + return -EINVAL; + } + } else { + for (i = 0; i < (command & 0x1fffff); i++) { + reg = start_reg + (4 * i); + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad DST register\n"); + return -EINVAL; + } + } + } + } + } + return 0; +} + static int si_vm_packet3_gfx_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { + int r; u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, end_reg, reg, i; - u32 command, info; switch (pkt->opcode) { case PACKET3_NOP: @@ -4186,50 +4374,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev, } break; case PACKET3_CP_DMA: - command = ib[idx + 4]; - info = ib[idx + 1]; - if (command & PACKET3_CP_DMA_CMD_SAS) { - /* src address space is register */ - if (((info & 0x60000000) >> 29) == 0) { - start_reg = idx_value << 2; - if (command & PACKET3_CP_DMA_CMD_SAIC) { - reg = start_reg; - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); - return -EINVAL; - } - } else { - for (i = 0; i < (command & 0x1fffff); i++) { - reg = start_reg + (4 * i); - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); - return -EINVAL; - } - } - } - } - } - if (command & PACKET3_CP_DMA_CMD_DAS) { - /* dst address space is register */ - if (((info & 0x00300000) >> 20) == 0) { - start_reg = ib[idx + 2]; - if (command & PACKET3_CP_DMA_CMD_DAIC) { - reg = start_reg; - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); - return -EINVAL; - } - } else { - for (i = 0; i < (command & 0x1fffff); i++) { - reg = start_reg + (4 * i); - if (!si_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); - return -EINVAL; - } - } - } - } - } + r = si_vm_packet3_cp_dma_check(ib, idx); + if (r) + return r; break; default: DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); @@ -4241,6 +4388,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev, static int si_vm_packet3_compute_check(struct radeon_device *rdev, u32 *ib, struct radeon_cs_packet *pkt) { + int r; u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, reg, i; @@ -4313,6 +4461,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev, return -EINVAL; } break; + case PACKET3_CP_DMA: + r = si_vm_packet3_cp_dma_check(ib, idx); + if (r) + return r; + break; default: DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode); return -EINVAL; @@ -4653,112 +4806,6 @@ static void si_vm_decode_fault(struct radeon_device *rdev, block, mc_id); } -/** - * si_vm_set_page - update the page tables using the CP - * - * @rdev: radeon_device pointer - * @ib: indirect buffer to fill with commands - * @pe: addr of the page entry - * @addr: dst addr to write into pe - * @count: number of page entries to update - * @incr: increase next addr by incr bytes - * @flags: access flags - * - * Update the page tables using the CP (SI). - */ -void si_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) -{ - uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - uint64_t value; - unsigned ndw; - - if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { - while (count) { - ndw = 2 + count * 2; - if (ndw > 0x3FFE) - ndw = 0x3FFE; - - ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw); - ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(1)); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - for (; ndw > 2; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - /* DMA */ - if (flags & RADEON_VM_PAGE_SYSTEM) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= r600_flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = r600_flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; - } - } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); - } -} - void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { struct radeon_ring *ring = &rdev->ring[ridx]; @@ -4802,32 +4849,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) radeon_ring_write(ring, 0x0); } -void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) -{ - struct radeon_ring *ring = &rdev->ring[ridx]; - - if (vm == NULL) - return; - - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - if (vm->id < 8) { - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); - } else { - radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); - } - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); - - /* flush hdp cache */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); - radeon_ring_write(ring, 1); - - /* bits 0-7 are the VM contexts0-7 */ - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); - radeon_ring_write(ring, 1 << vm->id); -} - /* * Power and clock gating */ @@ -4895,7 +4916,7 @@ static void si_set_uvd_dcm(struct radeon_device *rdev, WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2); } -static void si_init_uvd_internal_cg(struct radeon_device *rdev) +void si_init_uvd_internal_cg(struct radeon_device *rdev) { bool hw_mode = true; @@ -4938,7 +4959,7 @@ static void si_enable_dma_pg(struct radeon_device *rdev, bool enable) u32 data, orig; orig = data = RREG32(DMA_PG); - if (enable) + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA)) data |= PG_CNTL_ENABLE; else data &= ~PG_CNTL_ENABLE; @@ -4962,7 +4983,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev, { u32 tmp; - if (enable) { + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); WREG32(RLC_TTOP_D, tmp); @@ -5065,9 +5086,9 @@ static void si_enable_cgcg(struct radeon_device *rdev, orig = data = RREG32(RLC_CGCG_CGLS_CTRL); - si_enable_gui_idle_interrupt(rdev, enable); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) { + si_enable_gui_idle_interrupt(rdev, true); - if (enable) { WREG32(RLC_GCPM_GENERAL_3, 0x00000080); tmp = si_halt_rlc(rdev); @@ -5084,6 +5105,8 @@ static void si_enable_cgcg(struct radeon_device *rdev, data |= CGCG_EN | CGLS_EN; } else { + si_enable_gui_idle_interrupt(rdev, false); + RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); RREG32(CB_CGTT_SCLK_CTRL); @@ -5101,16 +5124,18 @@ static void si_enable_mgcg(struct radeon_device *rdev, { u32 data, orig, tmp = 0; - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) { orig = data = RREG32(CGTS_SM_CTRL_REG); data = 0x96940200; if (orig != data) WREG32(CGTS_SM_CTRL_REG, data); - orig = data = RREG32(CP_MEM_SLP_CNTL); - data |= CP_MEM_LS_EN; - if (orig != data) - WREG32(CP_MEM_SLP_CNTL, data); + if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) { + orig = data = RREG32(CP_MEM_SLP_CNTL); + data |= CP_MEM_LS_EN; + if (orig != data) + WREG32(CP_MEM_SLP_CNTL, data); + } orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); data &= 0xffffffc0; @@ -5155,7 +5180,7 @@ static void si_enable_uvd_mgcg(struct radeon_device *rdev, { u32 orig, data, tmp; - if (enable) { + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) { tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL); tmp |= 0x3fff; WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp); @@ -5203,7 +5228,7 @@ static void si_enable_mc_ls(struct radeon_device *rdev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable) + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS)) data |= MC_LS_ENABLE; else data &= ~MC_LS_ENABLE; @@ -5212,230 +5237,301 @@ static void si_enable_mc_ls(struct radeon_device *rdev, } } - -static void si_init_cg(struct radeon_device *rdev) +static void si_enable_mc_mgcg(struct radeon_device *rdev, + bool enable) { - bool has_uvd = true; + int i; + u32 orig, data; - si_enable_mgcg(rdev, true); - si_enable_cgcg(rdev, true); - /* disable MC LS on Tahiti */ - if (rdev->family == CHIP_TAHITI) - si_enable_mc_ls(rdev, false); - if (has_uvd) { - si_enable_uvd_mgcg(rdev, true); - si_init_uvd_internal_cg(rdev); + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG)) + data |= MC_CG_ENABLE; + else + data &= ~MC_CG_ENABLE; + if (data != orig) + WREG32(mc_cg_registers[i], data); } } -static void si_fini_cg(struct radeon_device *rdev) +static void si_enable_dma_mgcg(struct radeon_device *rdev, + bool enable) { - bool has_uvd = true; + u32 orig, data, offset; + int i; - if (has_uvd) - si_enable_uvd_mgcg(rdev, false); - si_enable_cgcg(rdev, false); - si_enable_mgcg(rdev, false); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) { + for (i = 0; i < 2; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data &= ~MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + WREG32(DMA_CLK_CTRL + offset, 0x00000100); + } + } else { + for (i = 0; i < 2; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data |= MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + + orig = data = RREG32(DMA_CLK_CTRL + offset); + data = 0xff000000; + if (data != orig) + WREG32(DMA_CLK_CTRL + offset, data); + } + } } -static void si_init_pg(struct radeon_device *rdev) +static void si_enable_bif_mgls(struct radeon_device *rdev, + bool enable) { - bool has_pg = false; + u32 orig, data; - /* only cape verde supports PG */ - if (rdev->family == CHIP_VERDE) - has_pg = true; + orig = data = RREG32_PCIE(PCIE_CNTL2); - if (has_pg) { - si_init_ao_cu_mask(rdev); - si_init_dma_pg(rdev); - si_enable_dma_pg(rdev, true); - si_init_gfx_cgpg(rdev); - si_enable_gfx_cgpg(rdev, true); - } else { - WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); - WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); - } + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS)) + data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN; + else + data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN | + REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN); + + if (orig != data) + WREG32_PCIE(PCIE_CNTL2, data); } -static void si_fini_pg(struct radeon_device *rdev) +static void si_enable_hdp_mgcg(struct radeon_device *rdev, + bool enable) { - bool has_pg = false; + u32 orig, data; - /* only cape verde supports PG */ - if (rdev->family == CHIP_VERDE) - has_pg = true; + orig = data = RREG32(HDP_HOST_PATH_CNTL); - if (has_pg) { - si_enable_dma_pg(rdev, false); - si_enable_gfx_cgpg(rdev, false); - } + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG)) + data &= ~CLOCK_GATING_DIS; + else + data |= CLOCK_GATING_DIS; + + if (orig != data) + WREG32(HDP_HOST_PATH_CNTL, data); } -/* - * RLC - */ -void si_rlc_fini(struct radeon_device *rdev) +static void si_enable_hdp_ls(struct radeon_device *rdev, + bool enable) { - int r; - - /* save restore block */ - if (rdev->rlc.save_restore_obj) { - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); - if (unlikely(r != 0)) - dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); - radeon_bo_unpin(rdev->rlc.save_restore_obj); - radeon_bo_unreserve(rdev->rlc.save_restore_obj); + u32 orig, data; - radeon_bo_unref(&rdev->rlc.save_restore_obj); - rdev->rlc.save_restore_obj = NULL; - } + orig = data = RREG32(HDP_MEM_POWER_LS); - /* clear state block */ - if (rdev->rlc.clear_state_obj) { - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); - if (unlikely(r != 0)) - dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); - radeon_bo_unpin(rdev->rlc.clear_state_obj); - radeon_bo_unreserve(rdev->rlc.clear_state_obj); + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS)) + data |= HDP_LS_ENABLE; + else + data &= ~HDP_LS_ENABLE; - radeon_bo_unref(&rdev->rlc.clear_state_obj); - rdev->rlc.clear_state_obj = NULL; - } + if (orig != data) + WREG32(HDP_MEM_POWER_LS, data); } -#define RLC_CLEAR_STATE_END_MARKER 0x00000001 - -int si_rlc_init(struct radeon_device *rdev) +static void si_update_cg(struct radeon_device *rdev, + u32 block, bool enable) { - volatile u32 *dst_ptr; - u32 dws, data, i, j, k, reg_num; - u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index; - u64 reg_list_mc_addr; - const struct cs_section_def *cs_data = si_cs_data; - int r; - - /* save restore block */ - if (rdev->rlc.save_restore_obj == NULL) { - r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, - &rdev->rlc.save_restore_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); - return r; + if (block & RADEON_CG_BLOCK_GFX) { + si_enable_gui_idle_interrupt(rdev, false); + /* order matters! */ + if (enable) { + si_enable_mgcg(rdev, true); + si_enable_cgcg(rdev, true); + } else { + si_enable_cgcg(rdev, false); + si_enable_mgcg(rdev, false); } + si_enable_gui_idle_interrupt(rdev, true); } - r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - si_rlc_fini(rdev); - return r; + if (block & RADEON_CG_BLOCK_MC) { + si_enable_mc_mgcg(rdev, enable); + si_enable_mc_ls(rdev, enable); } - r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.save_restore_gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->rlc.save_restore_obj); - dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); - si_rlc_fini(rdev); - return r; + + if (block & RADEON_CG_BLOCK_SDMA) { + si_enable_dma_mgcg(rdev, enable); } - if (rdev->family == CHIP_VERDE) { - r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr); - if (r) { - dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); - si_rlc_fini(rdev); - return r; - } - /* write the sr buffer */ - dst_ptr = rdev->rlc.sr_ptr; - for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) { - dst_ptr[i] = verde_rlc_save_restore_register_list[i]; - } - radeon_bo_kunmap(rdev->rlc.save_restore_obj); + if (block & RADEON_CG_BLOCK_BIF) { + si_enable_bif_mgls(rdev, enable); } - radeon_bo_unreserve(rdev->rlc.save_restore_obj); - /* clear state block */ - reg_list_num = 0; - dws = 0; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_list_num++; - dws += cs_data[i].section[j].reg_count; + if (block & RADEON_CG_BLOCK_UVD) { + if (rdev->has_uvd) { + si_enable_uvd_mgcg(rdev, enable); } } - reg_list_blk_index = (3 * reg_list_num + 2); - dws += reg_list_blk_index; - if (rdev->rlc.clear_state_obj == NULL) { - r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; - } + if (block & RADEON_CG_BLOCK_HDP) { + si_enable_hdp_mgcg(rdev, enable); + si_enable_hdp_ls(rdev, enable); } - r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - si_rlc_fini(rdev); - return r; +} + +static void si_init_cg(struct radeon_device *rdev) +{ + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), true); + if (rdev->has_uvd) { + si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); + si_init_uvd_internal_cg(rdev); } - r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->rlc.clear_state_gpu_addr); - if (r) { +} - radeon_bo_unreserve(rdev->rlc.clear_state_obj); - dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; +static void si_fini_cg(struct radeon_device *rdev) +{ + if (rdev->has_uvd) { + si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); } - r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr); - if (r) { - dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); - si_rlc_fini(rdev); - return r; + si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | + RADEON_CG_BLOCK_MC | + RADEON_CG_BLOCK_SDMA | + RADEON_CG_BLOCK_BIF | + RADEON_CG_BLOCK_HDP), false); +} + +u32 si_get_csb_size(struct radeon_device *rdev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return 0; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } } - /* set up the cs buffer */ - dst_ptr = rdev->rlc.cs_ptr; - reg_list_hdr_blk_index = 0; - reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4); - data = upper_32_bits(reg_list_mc_addr); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - for (i = 0; cs_data[i].section != NULL; i++) { - for (j = 0; cs_data[i].section[j].extent != NULL; j++) { - reg_num = cs_data[i].section[j].reg_count; - data = reg_list_mc_addr & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - data = 0x08000000 | (reg_num * 4); - dst_ptr[reg_list_hdr_blk_index] = data; - reg_list_hdr_blk_index++; - - for (k = 0; k < reg_num; k++) { - data = cs_data[i].section[j].extent[k]; - dst_ptr[reg_list_blk_index + k] = data; + /* pa_sc_raster_config */ + count += 3; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (rdev->rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000); + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } else { + return; } - reg_list_mc_addr += reg_num * 4; - reg_list_blk_index += reg_num; } } - dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; - radeon_bo_kunmap(rdev->rlc.clear_state_obj); - radeon_bo_unreserve(rdev->rlc.clear_state_obj); + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); + buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); + switch (rdev->family) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + buffer[count++] = cpu_to_le32(0x2a00126a); + break; + case CHIP_VERDE: + buffer[count++] = cpu_to_le32(0x0000124a); + break; + case CHIP_OLAND: + buffer[count++] = cpu_to_le32(0x00000082); + break; + case CHIP_HAINAN: + buffer[count++] = cpu_to_le32(0x00000000); + break; + default: + buffer[count++] = cpu_to_le32(0x00000000); + break; + } - return 0; + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); } -static void si_rlc_reset(struct radeon_device *rdev) +static void si_init_pg(struct radeon_device *rdev) +{ + if (rdev->pg_flags) { + if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) { + si_init_dma_pg(rdev); + } + si_init_ao_cu_mask(rdev); + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { + si_init_gfx_cgpg(rdev); + } else { + WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); + } + si_enable_dma_pg(rdev, true); + si_enable_gfx_cgpg(rdev, true); + } else { + WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); + } +} + +static void si_fini_pg(struct radeon_device *rdev) +{ + if (rdev->pg_flags) { + si_enable_dma_pg(rdev, false); + si_enable_gfx_cgpg(rdev, false); + } +} + +/* + * RLC + */ +void si_rlc_reset(struct radeon_device *rdev) { u32 tmp = RREG32(GRBM_SOFT_RESET); @@ -5565,7 +5661,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) { u32 tmp; - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + tmp = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + WREG32(CP_INT_CNTL_RING0, tmp); WREG32(CP_INT_CNTL_RING1, 0); WREG32(CP_INT_CNTL_RING2, 0); tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -5600,7 +5698,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) } if (!ASIC_IS_NODCE(rdev)) { - WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + WREG32(DAC_AUTODETECT_INT_CONTROL, 0); tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD1_INT_CONTROL, tmp); @@ -5651,7 +5749,7 @@ static int si_irq_init(struct radeon_device *rdev) WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); - rb_bufsz = drm_order(rdev->ih.ring_size / 4); + rb_bufsz = order_base_2(rdev->ih.ring_size / 4); ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | @@ -5690,12 +5788,11 @@ static int si_irq_init(struct radeon_device *rdev) int si_irq_set(struct radeon_device *rdev) { - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 cp_int_cntl; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; u32 thermal_int = 0; @@ -5711,6 +5808,9 @@ int si_irq_set(struct radeon_device *rdev) return 0; } + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + if (!ASIC_IS_NODCE(rdev)) { hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; @@ -5831,16 +5931,22 @@ int si_irq_set(struct radeon_device *rdev) } if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (!ASIC_IS_NODCE(rdev)) { @@ -5997,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -6058,7 +6165,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); + radeon_crtc_handle_vblank(rdev, 0); rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -6084,7 +6191,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); + radeon_crtc_handle_vblank(rdev, 1); rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } @@ -6110,7 +6217,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_flip(rdev, 2); + radeon_crtc_handle_vblank(rdev, 2); rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); } @@ -6136,7 +6243,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_flip(rdev, 3); + radeon_crtc_handle_vblank(rdev, 3); rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); } @@ -6162,7 +6269,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_flip(rdev, 4); + radeon_crtc_handle_vblank(rdev, 4); rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); } @@ -6188,7 +6295,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_flip(rdev, 5); + radeon_crtc_handle_vblank(rdev, 5); rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); } @@ -6204,6 +6311,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: @@ -6253,18 +6369,24 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 146: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); si_vm_decode_fault(rdev, status, addr); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; case 176: /* RINGID0 CP_INT */ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); @@ -6335,80 +6457,6 @@ restart_ih: return IRQ_HANDLED; } -/** - * si_copy_dma - copy pages using the DMA engine - * - * @rdev: radeon_device pointer - * @src_offset: src GPU address - * @dst_offset: dst GPU address - * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object - * - * Copy GPU paging using the DMA engine (SI). - * Used by the radeon ttm implementation to move pages if - * registered as the asic copy callback. - */ -int si_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) -{ - struct radeon_semaphore *sem = NULL; - int ring_index = rdev->asic->copy.dma_ring_index; - struct radeon_ring *ring = &rdev->ring[ring_index]; - u32 size_in_bytes, cur_size_in_bytes; - int i, num_loops; - int r = 0; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - - size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); - num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - radeon_semaphore_free(rdev, &sem, NULL); - return r; - } - - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } - - for (i = 0; i < num_loops; i++) { - cur_size_in_bytes = size_in_bytes; - if (cur_size_in_bytes > 0xFFFFF) - cur_size_in_bytes = 0xFFFFF; - size_in_bytes -= cur_size_in_bytes; - radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); - radeon_ring_write(ring, dst_offset & 0xffffffff); - radeon_ring_write(ring, src_offset & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); - src_offset += cur_size_in_bytes; - dst_offset += cur_size_in_bytes; - } - - r = radeon_fence_emit(rdev, fence, ring->idx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, *fence); - - return r; -} - /* * startup/shutdown callbacks */ @@ -6422,33 +6470,34 @@ static int si_startup(struct radeon_device *rdev) /* enable aspm */ si_program_aspm(rdev); - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || - !rdev->rlc_fw || !rdev->mc_fw) { - r = si_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - - r = si_mc_load_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load MC firmware!\n"); - return r; - } - + /* scratch needs to be initialized before MC */ r = r600_vram_scratch_init(rdev); if (r) return r; si_mc_program(rdev); + + if (!rdev->pm.dpm_enabled) { + r = si_mc_load_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load MC firmware!\n"); + return r; + } + } + r = si_pcie_gart_enable(rdev); if (r) return r; si_gpu_init(rdev); /* allocate rlc buffers */ - r = si_rlc_init(rdev); + if (rdev->family == CHIP_VERDE) { + rdev->rlc.reg_list = verde_rlc_save_restore_register_list; + rdev->rlc.reg_list_size = + (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list); + } + rdev->rlc.cs_data = si_cs_data; + r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); return r; @@ -6490,7 +6539,7 @@ static int si_startup(struct radeon_device *rdev) } if (rdev->has_uvd) { - r = rv770_uvd_resume(rdev); + r = uvd_v2_2_resume(rdev); if (!r) { r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); @@ -6518,38 +6567,31 @@ static int si_startup(struct radeon_device *rdev) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, - CP_RB0_RPTR, CP_RB0_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, - CP_RB1_RPTR, CP_RB1_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, - CP_RB2_RPTR, CP_RB2_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + RADEON_CP_PACKET2); if (r) return r; ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, - DMA_RB_RPTR + DMA0_REGISTER_OFFSET, - DMA_RB_WPTR + DMA0_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, - DMA_RB_RPTR + DMA1_REGISTER_OFFSET, - DMA_RB_WPTR + DMA1_REGISTER_OFFSET, - 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); if (r) return r; @@ -6567,12 +6609,10 @@ static int si_startup(struct radeon_device *rdev) if (rdev->has_uvd) { ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, - UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + RADEON_CP_PACKET2); if (!r) - r = r600_uvd_init(rdev); + r = uvd_v1_0_init(rdev); if (r) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } @@ -6590,6 +6630,10 @@ static int si_startup(struct radeon_device *rdev) return r; } + r = dce6_audio_init(rdev); + if (r) + return r; + return 0; } @@ -6607,6 +6651,9 @@ int si_resume(struct radeon_device *rdev) /* init golden registers */ si_init_golden_registers(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); + rdev->accel_working = true; r = si_startup(rdev); if (r) { @@ -6621,13 +6668,17 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { + radeon_pm_suspend(rdev); + dce6_audio_fini(rdev); radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - r600_uvd_rbc_stop(rdev); + uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); } + si_fini_pg(rdev); + si_fini_cg(rdev); si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); @@ -6691,6 +6742,18 @@ int si_init(struct radeon_device *rdev) if (r) return r; + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || + !rdev->rlc_fw || !rdev->mc_fw) { + r = si_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + + /* Initialize power management */ + radeon_pm_init(rdev); + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -6734,7 +6797,7 @@ int si_init(struct radeon_device *rdev) si_cp_fini(rdev); cayman_dma_fini(rdev); si_irq_fini(rdev); - si_rlc_fini(rdev); + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_vm_manager_fini(rdev); @@ -6757,18 +6820,21 @@ int si_init(struct radeon_device *rdev) void si_fini(struct radeon_device *rdev) { + radeon_pm_fini(rdev); si_cp_fini(rdev); cayman_dma_fini(rdev); - si_irq_fini(rdev); - si_rlc_fini(rdev); - si_fini_cg(rdev); si_fini_pg(rdev); + si_fini_cg(rdev); + si_irq_fini(rdev); + sumo_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - if (rdev->has_uvd) + if (rdev->has_uvd) { + uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + } si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); |
