diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 329 | 
1 files changed, 223 insertions, 106 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 555164e270a..15e4f28015e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -146,6 +146,7 @@ extern u32 si_get_csb_size(struct radeon_device *rdev);  extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);  extern u32 cik_get_csb_size(struct radeon_device *rdev);  extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); +extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);  static const u32 evergreen_golden_registers[] =  { @@ -188,7 +189,7 @@ static const u32 evergreen_golden_registers[] =  	0x8c1c, 0xffffffff, 0x00001010,  	0x28350, 0xffffffff, 0x00000000,  	0xa008, 0xffffffff, 0x00010000, -	0x5cc, 0xffffffff, 0x00000001, +	0x5c4, 0xffffffff, 0x00000001,  	0x9508, 0xffffffff, 0x00000002,  	0x913c, 0x0000000f, 0x0000000a  }; @@ -475,7 +476,7 @@ static const u32 cedar_golden_registers[] =  	0x8c1c, 0xffffffff, 0x00001010,  	0x28350, 0xffffffff, 0x00000000,  	0xa008, 0xffffffff, 0x00010000, -	0x5cc, 0xffffffff, 0x00000001, +	0x5c4, 0xffffffff, 0x00000001,  	0x9508, 0xffffffff, 0x00000002  }; @@ -634,7 +635,7 @@ static const u32 juniper_mgcg_init[] =  static const u32 supersumo_golden_registers[] =  {  	0x5eb4, 0xffffffff, 0x00000002, -	0x5cc, 0xffffffff, 0x00000001, +	0x5c4, 0xffffffff, 0x00000001,  	0x7030, 0xffffffff, 0x00000011,  	0x7c30, 0xffffffff, 0x00000011,  	0x6104, 0x01000300, 0x00000000, @@ -718,7 +719,7 @@ static const u32 sumo_golden_registers[] =  static const u32 wrestler_golden_registers[] =  {  	0x5eb4, 0xffffffff, 0x00000002, -	0x5cc, 0xffffffff, 0x00000001, +	0x5c4, 0xffffffff, 0x00000001,  	0x7030, 0xffffffff, 0x00000011,  	0x7c30, 0xffffffff, 0x00000011,  	0x6104, 0x01000300, 0x00000000, @@ -1174,23 +1175,72 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)  void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)  { -	u16 ctl, v; -	int err; - -	err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl); -	if (err) -		return; - -	v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; +	int readrq; +	u16 v; +	readrq = pcie_get_readrq(rdev->pdev); +	v = ffs(readrq) - 8;  	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it  	 * to avoid hangs or perfomance issues  	 */ -	if ((v == 0) || (v == 6) || (v == 7)) { -		ctl &= ~PCI_EXP_DEVCTL_READRQ; -		ctl |= (2 << 12); -		pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl); +	if ((v == 0) || (v == 6) || (v == 7)) +		pcie_set_readrq(rdev->pdev, 512); +} + +void dce4_program_fmt(struct drm_encoder *encoder) +{ +	struct drm_device *dev = encoder->dev; +	struct radeon_device *rdev = dev->dev_private; +	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); +	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); +	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); +	int bpc = 0; +	u32 tmp = 0; +	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; + +	if (connector) { +		struct radeon_connector *radeon_connector = to_radeon_connector(connector); +		bpc = radeon_get_monitor_bpc(connector); +		dither = radeon_connector->dither; +	} + +	/* LVDS/eDP FMT is set up by atom */ +	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) +		return; + +	/* not needed for analog */ +	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || +	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) +		return; + +	if (bpc == 0) +		return; + +	switch (bpc) { +	case 6: +		if (dither == RADEON_FMT_DITHER_ENABLE) +			/* XXX sort out optimal dither settings */ +			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | +				FMT_SPATIAL_DITHER_EN); +		else +			tmp |= FMT_TRUNCATE_EN; +		break; +	case 8: +		if (dither == RADEON_FMT_DITHER_ENABLE) +			/* XXX sort out optimal dither settings */ +			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | +				FMT_RGB_RANDOM_ENABLE | +				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); +		else +			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); +		break; +	case 10: +	default: +		/* not needed */ +		break;  	} + +	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);  }  static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) @@ -1251,36 +1301,6 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)  }  /** - * radeon_irq_kms_pflip_irq_get - pre-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to prepare for pageflip on - * - * Pre-pageflip callback (evergreen+). - * Enables the pageflip irq (vblank irq). - */ -void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) -{ -	/* enable the pflip int */ -	radeon_irq_kms_pflip_irq_get(rdev, crtc); -} - -/** - * evergreen_post_page_flip - pos-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to cleanup pageflip on - * - * Post-pageflip callback (evergreen+). - * Disables the pageflip irq (vblank irq). - */ -void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) -{ -	/* disable the pflip int */ -	radeon_irq_kms_pflip_irq_put(rdev, crtc); -} - -/**   * evergreen_page_flip - pageflip callback.   *   * @rdev: radeon_device pointer @@ -1293,7 +1313,7 @@ void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)   * double buffered update to take place.   * Returns the current update pending status.   */ -u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)  {  	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];  	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); @@ -1325,9 +1345,23 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)  	/* Unlock the lock, so double-buffering can take place inside vblank */  	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;  	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); +} + +/** + * evergreen_page_flip_pending - check if page flip is still pending + * + * @rdev: radeon_device pointer + * @crtc_id: crtc to check + * + * Returns the current update pending status. + */ +bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id) +{ +	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];  	/* Return current update_pending status: */ -	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; +	return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & +		EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);  }  /* get temperature in millidegrees */ @@ -1630,7 +1664,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)  	case RADEON_HPD_6:  		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)  			connected = true; -			break; +		break;  	default:  		break;  	} @@ -2608,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s  	for (i = 0; i < rdev->num_crtc; i++) {  		if (save->crtc_enabled[i]) {  			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); -			if ((tmp & 0x3) != 0) { -				tmp &= ~0x3; +			if ((tmp & 0x7) != 3) { +				tmp &= ~0x7; +				tmp |= 0x3;  				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);  			}  			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); @@ -2940,8 +2975,6 @@ static int evergreen_cp_resume(struct radeon_device *rdev)  	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);  	WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); -	ring->rptr = RREG32(CP_RB_RPTR); -  	evergreen_cp_start(rdev);  	ring->ready = true;  	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); @@ -3131,7 +3164,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)  		rdev->config.evergreen.sx_max_export_size = 256;  		rdev->config.evergreen.sx_max_export_pos_size = 64;  		rdev->config.evergreen.sx_max_export_smx_size = 192; -		rdev->config.evergreen.max_hw_contexts = 8; +		rdev->config.evergreen.max_hw_contexts = 4;  		rdev->config.evergreen.sq_num_cf_insts = 2;  		rdev->config.evergreen.sc_prim_fifo_size = 0x40; @@ -3305,6 +3338,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)  			disabled_rb_mask &= ~(1 << i);  	} +	for (i = 0; i < rdev->config.evergreen.num_ses; i++) { +		u32 simd_disable_bitmap; + +		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); +		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); +		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; +		simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds; +		tmp <<= 16; +		tmp |= simd_disable_bitmap; +	} +	rdev->config.evergreen.active_simds = hweight32(~tmp); +  	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);  	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); @@ -3818,6 +3863,48 @@ static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)  	evergreen_print_gpu_status_regs(rdev);  } +void evergreen_gpu_pci_config_reset(struct radeon_device *rdev) +{ +	struct evergreen_mc_save save; +	u32 tmp, i; + +	dev_info(rdev->dev, "GPU pci config reset\n"); + +	/* disable dpm? */ + +	/* Disable CP parsing/prefetching */ +	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); +	udelay(50); +	/* Disable DMA */ +	tmp = RREG32(DMA_RB_CNTL); +	tmp &= ~DMA_RB_ENABLE; +	WREG32(DMA_RB_CNTL, tmp); +	/* XXX other engines? */ + +	/* halt the rlc */ +	r600_rlc_stop(rdev); + +	udelay(50); + +	/* set mclk/sclk to bypass */ +	rv770_set_clk_bypass_mode(rdev); +	/* disable BM */ +	pci_clear_master(rdev->pdev); +	/* disable mem access */ +	evergreen_mc_stop(rdev, &save); +	if (evergreen_mc_wait_for_idle(rdev)) { +		dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); +	} +	/* reset */ +	radeon_pci_config_reset(rdev); +	/* wait for asic to come out of reset */ +	for (i = 0; i < rdev->usec_timeout; i++) { +		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) +			break; +		udelay(1); +	} +} +  int evergreen_asic_reset(struct radeon_device *rdev)  {  	u32 reset_mask; @@ -3827,10 +3914,17 @@ int evergreen_asic_reset(struct radeon_device *rdev)  	if (reset_mask)  		r600_set_bios_scratch_engine_hung(rdev, true); +	/* try soft reset */  	evergreen_gpu_soft_reset(rdev, reset_mask);  	reset_mask = evergreen_gpu_check_soft_reset(rdev); +	/* try pci config reset */ +	if (reset_mask && radeon_hard_reset) +		evergreen_gpu_pci_config_reset(rdev); + +	reset_mask = evergreen_gpu_check_soft_reset(rdev); +  	if (!reset_mask)  		r600_set_bios_scratch_engine_hung(rdev, false); @@ -3853,11 +3947,9 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin  	if (!(reset_mask & (RADEON_RESET_GFX |  			    RADEON_RESET_COMPUTE |  			    RADEON_RESET_CP))) { -		radeon_ring_lockup_update(ring); +		radeon_ring_lockup_update(rdev, ring);  		return false;  	} -	/* force CP activities */ -	radeon_ring_force_activity(rdev, ring);  	return radeon_ring_test_lockup(rdev, ring);  } @@ -3963,7 +4055,7 @@ int sumo_rlc_init(struct radeon_device *rdev)  		if (rdev->family >= CHIP_TAHITI) {  			/* SI */  			for (i = 0; i < rdev->rlc.reg_list_size; i++) -				dst_ptr[i] = src_ptr[i]; +				dst_ptr[i] = cpu_to_le32(src_ptr[i]);  		} else {  			/* ON/LN/TN */  			/* format: @@ -3977,10 +4069,10 @@ int sumo_rlc_init(struct radeon_device *rdev)  				if (i < dws)  					data |= (src_ptr[i] >> 2) << 16;  				j = (((i - 1) * 3) / 2); -				dst_ptr[j] = data; +				dst_ptr[j] = cpu_to_le32(data);  			}  			j = ((i * 3) / 2); -			dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER; +			dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);  		}  		radeon_bo_kunmap(rdev->rlc.save_restore_obj);  		radeon_bo_unreserve(rdev->rlc.save_restore_obj); @@ -4042,40 +4134,40 @@ int sumo_rlc_init(struct radeon_device *rdev)  			cik_get_csb_buffer(rdev, dst_ptr);  		} else if (rdev->family >= CHIP_TAHITI) {  			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256; -			dst_ptr[0] = upper_32_bits(reg_list_mc_addr); -			dst_ptr[1] = lower_32_bits(reg_list_mc_addr); -			dst_ptr[2] = rdev->rlc.clear_state_size; +			dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr)); +			dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr)); +			dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);  			si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);  		} else {  			reg_list_hdr_blk_index = 0;  			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);  			data = upper_32_bits(reg_list_mc_addr); -			dst_ptr[reg_list_hdr_blk_index] = data; +			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);  			reg_list_hdr_blk_index++;  			for (i = 0; cs_data[i].section != NULL; i++) {  				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {  					reg_num = cs_data[i].section[j].reg_count;  					data = reg_list_mc_addr & 0xffffffff; -					dst_ptr[reg_list_hdr_blk_index] = data; +					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);  					reg_list_hdr_blk_index++;  					data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff; -					dst_ptr[reg_list_hdr_blk_index] = data; +					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);  					reg_list_hdr_blk_index++;  					data = 0x08000000 | (reg_num * 4); -					dst_ptr[reg_list_hdr_blk_index] = data; +					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);  					reg_list_hdr_blk_index++;  					for (k = 0; k < reg_num; k++) {  						data = cs_data[i].section[j].extent[k]; -						dst_ptr[reg_list_blk_index + k] = data; +						dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);  					}  					reg_list_mc_addr += reg_num * 4;  					reg_list_blk_index += reg_num;  				}  			} -			dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER; +			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);  		}  		radeon_bo_kunmap(rdev->rlc.clear_state_obj);  		radeon_bo_unreserve(rdev->rlc.clear_state_obj); @@ -4249,8 +4341,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)  		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);  	} -	/* only one DAC on DCE6 */ -	if (!ASIC_IS_DCE6(rdev)) +	/* only one DAC on DCE5 */ +	if (!ASIC_IS_DCE5(rdev))  		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);  	WREG32(DACB_AUTODETECT_INT_CONTROL, 0); @@ -4276,7 +4368,6 @@ int evergreen_irq_set(struct radeon_device *rdev)  	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;  	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;  	u32 grbm_int_cntl = 0; -	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;  	u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;  	u32 dma_cntl, dma_cntl1 = 0;  	u32 thermal_int = 0; @@ -4459,15 +4550,21 @@ int evergreen_irq_set(struct radeon_device *rdev)  		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);  	} -	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); -	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); +	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, +	       GRPH_PFLIP_INT_MASK); +	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, +	       GRPH_PFLIP_INT_MASK);  	if (rdev->num_crtc >= 4) { -		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); -		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); +		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, +		       GRPH_PFLIP_INT_MASK); +		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, +		       GRPH_PFLIP_INT_MASK);  	}  	if (rdev->num_crtc >= 6) { -		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); -		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); +		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, +		       GRPH_PFLIP_INT_MASK); +		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, +		       GRPH_PFLIP_INT_MASK);  	}  	WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -4659,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)  		tmp = RREG32(IH_RB_CNTL);  		tmp |= IH_WPTR_OVERFLOW_CLEAR;  		WREG32(IH_RB_CNTL, tmp); +		wptr &= ~RB_OVERFLOW;  	}  	return (wptr & rdev->ih.ptr_mask);  } @@ -4710,7 +4808,7 @@ restart_ih:  						wake_up(&rdev->irq.vblank_queue);  					}  					if (atomic_read(&rdev->irq.pflip[0])) -						radeon_crtc_handle_flip(rdev, 0); +						radeon_crtc_handle_vblank(rdev, 0);  					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;  					DRM_DEBUG("IH: D1 vblank\n");  				} @@ -4736,7 +4834,7 @@ restart_ih:  						wake_up(&rdev->irq.vblank_queue);  					}  					if (atomic_read(&rdev->irq.pflip[1])) -						radeon_crtc_handle_flip(rdev, 1); +						radeon_crtc_handle_vblank(rdev, 1);  					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;  					DRM_DEBUG("IH: D2 vblank\n");  				} @@ -4762,7 +4860,7 @@ restart_ih:  						wake_up(&rdev->irq.vblank_queue);  					}  					if (atomic_read(&rdev->irq.pflip[2])) -						radeon_crtc_handle_flip(rdev, 2); +						radeon_crtc_handle_vblank(rdev, 2);  					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;  					DRM_DEBUG("IH: D3 vblank\n");  				} @@ -4788,7 +4886,7 @@ restart_ih:  						wake_up(&rdev->irq.vblank_queue);  					}  					if (atomic_read(&rdev->irq.pflip[3])) -						radeon_crtc_handle_flip(rdev, 3); +						radeon_crtc_handle_vblank(rdev, 3);  					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;  					DRM_DEBUG("IH: D4 vblank\n");  				} @@ -4814,7 +4912,7 @@ restart_ih:  						wake_up(&rdev->irq.vblank_queue);  					}  					if (atomic_read(&rdev->irq.pflip[4])) -						radeon_crtc_handle_flip(rdev, 4); +						radeon_crtc_handle_vblank(rdev, 4);  					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;  					DRM_DEBUG("IH: D5 vblank\n");  				} @@ -4840,7 +4938,7 @@ restart_ih:  						wake_up(&rdev->irq.vblank_queue);  					}  					if (atomic_read(&rdev->irq.pflip[5])) -						radeon_crtc_handle_flip(rdev, 5); +						radeon_crtc_handle_vblank(rdev, 5);  					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;  					DRM_DEBUG("IH: D6 vblank\n");  				} @@ -4856,6 +4954,15 @@ restart_ih:  				break;  			}  			break; +		case 8: /* D1 page flip */ +		case 10: /* D2 page flip */ +		case 12: /* D3 page flip */ +		case 14: /* D4 page flip */ +		case 16: /* D5 page flip */ +		case 18: /* D6 page flip */ +			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); +			radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); +			break;  		case 42: /* HPD hotplug */  			switch (src_data) {  			case 0: @@ -4961,14 +5068,16 @@ restart_ih:  		case 147:  			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);  			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); +			/* reset addr and status */ +			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); +			if (addr == 0x0 && status == 0x0) +				break;  			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);  			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",  				addr);  			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",  				status);  			cayman_vm_decode_fault(rdev, status, addr); -			/* reset addr and status */ -			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);  			break;  		case 176: /* CP_INT in ring buffer */  		case 177: /* CP_INT in IB1 */ @@ -5060,27 +5169,12 @@ static int evergreen_startup(struct radeon_device *rdev)  	evergreen_mc_program(rdev); -	if (ASIC_IS_DCE5(rdev)) { -		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { -			r = ni_init_microcode(rdev); -			if (r) { -				DRM_ERROR("Failed to load firmware!\n"); -				return r; -			} -		} +	if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {  		r = ni_mc_load_microcode(rdev);  		if (r) {  			DRM_ERROR("Failed to load MC firmware!\n");  			return r;  		} -	} else { -		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { -			r = r600_init_microcode(rdev); -			if (r) { -				DRM_ERROR("Failed to load firmware!\n"); -				return r; -			} -		}  	}  	if (rdev->flags & RADEON_IS_AGP) { @@ -5150,14 +5244,12 @@ static int evergreen_startup(struct radeon_device *rdev)  	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];  	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, -			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,  			     RADEON_CP_PACKET2);  	if (r)  		return r;  	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];  	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, -			     DMA_RB_RPTR, DMA_RB_WPTR,  			     DMA_PACKET(DMA_PACKET_NOP, 0, 0));  	if (r)  		return r; @@ -5175,7 +5267,6 @@ static int evergreen_startup(struct radeon_device *rdev)  	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];  	if (ring->ring_size) {  		r = radeon_ring_init(rdev, ring, ring->ring_size, 0, -				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,  				     RADEON_CP_PACKET2);  		if (!r)  			r = uvd_v1_0_init(rdev); @@ -5218,6 +5309,9 @@ int evergreen_resume(struct radeon_device *rdev)  	/* init golden registers */  	evergreen_init_golden_registers(rdev); +	if (rdev->pm.pm_method == PM_METHOD_DPM) +		radeon_pm_resume(rdev); +  	rdev->accel_working = true;  	r = evergreen_startup(rdev);  	if (r) { @@ -5232,6 +5326,7 @@ int evergreen_resume(struct radeon_device *rdev)  int evergreen_suspend(struct radeon_device *rdev)  { +	radeon_pm_suspend(rdev);  	r600_audio_fini(rdev);  	uvd_v1_0_fini(rdev);  	radeon_uvd_suspend(rdev); @@ -5308,6 +5403,27 @@ int evergreen_init(struct radeon_device *rdev)  	if (r)  		return r; +	if (ASIC_IS_DCE5(rdev)) { +		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { +			r = ni_init_microcode(rdev); +			if (r) { +				DRM_ERROR("Failed to load firmware!\n"); +				return r; +			} +		} +	} else { +		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { +			r = r600_init_microcode(rdev); +			if (r) { +				DRM_ERROR("Failed to load firmware!\n"); +				return r; +			} +		} +	} + +	/* Initialize power management */ +	radeon_pm_init(rdev); +  	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;  	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -5360,6 +5476,7 @@ int evergreen_init(struct radeon_device *rdev)  void evergreen_fini(struct radeon_device *rdev)  { +	radeon_pm_fini(rdev);  	r600_audio_fini(rdev);  	r700_cp_fini(rdev);  	r600_dma_fini(rdev); @@ -5369,9 +5486,9 @@ void evergreen_fini(struct radeon_device *rdev)  	radeon_wb_fini(rdev);  	radeon_ib_pool_fini(rdev);  	radeon_irq_kms_fini(rdev); -	evergreen_pcie_gart_fini(rdev);  	uvd_v1_0_fini(rdev);  	radeon_uvd_fini(rdev); +	evergreen_pcie_gart_fini(rdev);  	r600_vram_scratch_fini(rdev);  	radeon_gem_fini(rdev);  	radeon_fence_driver_fini(rdev);  | 
