aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c1436
1 files changed, 723 insertions, 713 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 3cb9d608937..3c69f58e46e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
@@ -38,18 +37,7 @@
#include "r600d.h"
#include "atom.h"
#include "avivod.h"
-
-#define PFP_UCODE_SIZE 576
-#define PM4_UCODE_SIZE 1792
-#define RLC_UCODE_SIZE 768
-#define R700_PFP_UCODE_SIZE 848
-#define R700_PM4_UCODE_SIZE 1360
-#define R700_RLC_UCODE_SIZE 1024
-#define EVERGREEN_PFP_UCODE_SIZE 1120
-#define EVERGREEN_PM4_UCODE_SIZE 1376
-#define EVERGREEN_RLC_UCODE_SIZE 768
-#define CAYMAN_RLC_UCODE_SIZE 1024
-#define ARUBA_RLC_UCODE_SIZE 1536
+#include "radeon_ucode.h"
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,24 +56,32 @@ MODULE_FIRMWARE("radeon/RS780_pfp.bin");
MODULE_FIRMWARE("radeon/RS780_me.bin");
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV770_smc.bin");
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV730_smc.bin");
+MODULE_FIRMWARE("radeon/RV740_smc.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/RV710_smc.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
MODULE_FIRMWARE("radeon/PALM_me.bin");
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
@@ -94,6 +90,12 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
@@ -102,6 +104,79 @@ static void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+extern int evergreen_rlc_resume(struct radeon_device *rdev);
+extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
+
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+ return rdev->clock.spll.reference_freq;
+}
+
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ return 0;
+}
+
+void dce3_program_fmt(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ bpc = radeon_get_monitor_bpc(connector);
+ dither = radeon_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ /* not needed for analog */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+ return;
+
+ if (bpc == 0)
+ return;
+
+ switch (bpc) {
+ case 6:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= FMT_SPATIAL_DITHER_EN;
+ else
+ tmp |= FMT_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == RADEON_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
@@ -1027,6 +1102,31 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ unsigned long flags;
+ uint32_t r;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+ r = RREG32(R_0028FC_MC_DATA);
+ WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+ return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+ S_0028F8_MC_IND_WR_EN(1));
+ WREG32(R_0028FC_MC_DATA, v);
+ WREG32(R_0028F8_MC_INDEX, 0x7F);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+}
+
static void r600_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -1126,7 +1226,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
- size_af = 0xFFFFFFFF - mc->gtt_end;
+ size_af = mc->mc_mask - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1162,6 +1262,8 @@ static int r600_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -1202,7 +1304,30 @@ static int r600_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP) {
rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+ }
}
+
radeon_update_bandwidth_info(rdev);
return 0;
}
@@ -1254,207 +1379,381 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
-/* We doesn't check that the GPU really needs a reset we simply do the
- * reset, it's up to the caller to determine if the GPU needs one. We
- * might add an helper function to check that.
- */
-static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
{
- u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
- S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
- S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
- S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
- S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
- S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
- S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
- S_008010_GUI_ACTIVE(1);
- u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
- S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
- S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
- S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
- S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
- S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
- S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
- S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 tmp;
+ u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return;
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
+ RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
+ RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
+ RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
+ RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
+ RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
+ RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
- /* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[2];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+ crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
- /* Check if any of the rendering block is busy and reset it */
- if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
- (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
- tmp = S_008020_SOFT_RESET_CR(1) |
- S_008020_SOFT_RESET_DB(1) |
- S_008020_SOFT_RESET_CB(1) |
- S_008020_SOFT_RESET_PA(1) |
- S_008020_SOFT_RESET_SC(1) |
- S_008020_SOFT_RESET_SMX(1) |
- S_008020_SOFT_RESET_SPI(1) |
- S_008020_SOFT_RESET_SX(1) |
- S_008020_SOFT_RESET_SH(1) |
- S_008020_SOFT_RESET_TC(1) |
- S_008020_SOFT_RESET_TA(1) |
- S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1);
- dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
}
- /* Reset CP (we always reset CP) */
- tmp = S_008020_SOFT_RESET_CP(1);
- dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ return true;
+}
+
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(R_008010_GRBM_STATUS);
+ if (rdev->family >= CHIP_RV770) {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ } else {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ }
+
+ if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+ G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (G_008010_GRBM_EE_BUSY(tmp))
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(R_000E50_SRBM_STATUS);
+ if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (G_000E50_IH_BUSY(tmp))
+ reset_mask |= RADEON_RESET_IH;
+
+ if (G_000E50_SEM_BUSY(tmp))
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (G_000E50_GRBM_RQ_PENDING(tmp))
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (G_000E50_VMC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+ G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+ G_000E50_MCDW_BUSY(tmp))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (r600_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
+ return reset_mask;
}
-static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
+ struct rv515_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ if (reset_mask == 0)
return;
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ r600_print_gpu_status_regs(rdev);
- /* Reset dma */
+ /* Disable CP parsing/prefetching */
if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ mdelay(50);
+
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ if (rdev->family >= CHIP_RV770)
+ grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ else
+ grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+ S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SMX(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
+ S_008020_SOFT_RESET_VGT(1);
+
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ if (rdev->family >= CHIP_RV770)
+ srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+ else
+ srbm_soft_reset |= SOFT_RESET_DMA;
+ }
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+ }
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ mdelay(1);
+
+ rv515_mc_resume(rdev, &save);
udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
+ r600_print_gpu_status_regs(rdev);
}
-static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
+ u32 tmp, i;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+ dev_info(rdev->dev, "GPU pci config reset\n");
- if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
- reset_mask &= ~RADEON_RESET_DMA;
+ /* disable dpm? */
- if (reset_mask == 0)
- return 0;
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
- dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ mdelay(50);
+
+ /* set mclk/sclk to bypass */
+ if (rdev->family >= CHIP_RV770)
+ rv770_set_clk_bypass_mode(rdev);
+ /* disable BM */
+ pci_clear_master(rdev->pdev);
+ /* disable mem access */
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
- r600_gpu_soft_reset_gfx(rdev);
+ /* BIF reset workaround. Not sure if this is needed on 6xx */
+ tmp = RREG32(BUS_CNTL);
+ tmp |= VGA_COHE_SPEC_TIMER_DIS;
+ WREG32(BUS_CNTL, tmp);
- if (reset_mask & RADEON_RESET_DMA)
- r600_gpu_soft_reset_dma(rdev);
+ tmp = RREG32(BIF_SCRATCH0);
- /* Wait a little for things to settle down */
+ /* reset */
+ radeon_pci_config_reset(rdev);
mdelay(1);
- rv515_mc_resume(rdev, &save);
- return 0;
+ /* BIF reset workaround. Not sure if this is needed on 6xx */
+ tmp = SOFT_RESET_BIF;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ mdelay(1);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
+ break;
+ udelay(1);
+ }
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+int r600_asic_reset(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status2;
-
- srbm_status = RREG32(R_000E50_SRBM_STATUS);
- grbm_status = RREG32(R_008010_GRBM_STATUS);
- grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
- if (!G_008010_GUI_ACTIVE(grbm_status)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
+ u32 reset_mask;
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ /* try soft reset */
+ r600_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ /* try pci config reset */
+ if (reset_mask && radeon_hard_reset)
+ r600_gpu_pci_config_reset(rdev);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
}
/**
- * r600_dma_is_lockup - Check if the DMA engine is locked up
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Check if the GFX engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
-bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
- dma_status_reg = RREG32(DMA_STATUS_REG);
- if (dma_status_reg & DMA_IDLE) {
- radeon_ring_lockup_update(ring);
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(rdev, ring);
return false;
}
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
-int r600_asic_reset(struct radeon_device *rdev)
-{
- return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_DMA));
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -1462,12 +1761,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 disabled_rb_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
- u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 pipe_rb_ratio, pipe_rb_remain, tmp;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
/* mask out the RBs that don't exist on that asic */
- disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+ tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+ /* make sure at least one RB is available */
+ if ((tmp & 0xff) != 0xff)
+ disabled_rb_mask = tmp;
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -1656,6 +1958,9 @@ static void r600_gpu_init(struct radeon_device *rdev)
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
+ tmp = rdev->config.r600.max_simds -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+ rdev->config.r600.active_simds = tmp;
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
@@ -1922,20 +2227,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
*/
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
r = RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
WREG32(PCIE_PORT_DATA, (v));
(void)RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
/*
@@ -1943,7 +2255,8 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
void r600_cp_stop(struct radeon_device *rdev)
{
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -1951,22 +2264,15 @@ void r600_cp_stop(struct radeon_device *rdev)
int r600_init_microcode(struct radeon_device *rdev)
{
- struct platform_device *pdev;
const char *chip_name;
const char *rlc_chip_name;
- size_t pfp_req_size, me_req_size, rlc_req_size;
+ const char *smc_chip_name = "RV770";
+ size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
char fw_name[30];
int err;
DRM_DEBUG("\n");
- pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
- err = IS_ERR(pdev);
- if (err) {
- printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
- return -EINVAL;
- }
-
switch (rdev->family) {
case CHIP_R600:
chip_name = "R600";
@@ -2000,32 +2306,51 @@ int r600_init_microcode(struct radeon_device *rdev)
case CHIP_RV770:
chip_name = "RV770";
rlc_chip_name = "R700";
+ smc_chip_name = "RV770";
+ smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
break;
case CHIP_RV730:
- case CHIP_RV740:
chip_name = "RV730";
rlc_chip_name = "R700";
+ smc_chip_name = "RV730";
+ smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
break;
case CHIP_RV710:
chip_name = "RV710";
rlc_chip_name = "R700";
+ smc_chip_name = "RV710";
+ smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
+ break;
+ case CHIP_RV740:
+ chip_name = "RV730";
+ rlc_chip_name = "R700";
+ smc_chip_name = "RV740";
+ smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
break;
case CHIP_CEDAR:
chip_name = "CEDAR";
rlc_chip_name = "CEDAR";
+ smc_chip_name = "CEDAR";
+ smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
break;
case CHIP_REDWOOD:
chip_name = "REDWOOD";
rlc_chip_name = "REDWOOD";
+ smc_chip_name = "REDWOOD";
+ smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
break;
case CHIP_JUNIPER:
chip_name = "JUNIPER";
rlc_chip_name = "JUNIPER";
+ smc_chip_name = "JUNIPER";
+ smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
+ smc_chip_name = "CYPRESS";
+ smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
break;
case CHIP_PALM:
chip_name = "PALM";
@@ -2051,15 +2376,15 @@ int r600_init_microcode(struct radeon_device *rdev)
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
} else {
- pfp_req_size = PFP_UCODE_SIZE * 4;
- me_req_size = PM4_UCODE_SIZE * 12;
- rlc_req_size = RLC_UCODE_SIZE * 4;
+ pfp_req_size = R600_PFP_UCODE_SIZE * 4;
+ me_req_size = R600_PM4_UCODE_SIZE * 12;
+ rlc_req_size = R600_RLC_UCODE_SIZE * 4;
}
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
@@ -2071,7 +2396,7 @@ int r600_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
@@ -2082,7 +2407,7 @@ int r600_init_microcode(struct radeon_device *rdev)
}
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
@@ -2092,9 +2417,25 @@ int r600_init_microcode(struct radeon_device *rdev)
err = -EINVAL;
}
-out:
- platform_device_unregister(pdev);
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ }
+out:
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
@@ -2106,10 +2447,42 @@ out:
rdev->me_fw = NULL;
release_firmware(rdev->rlc_fw);
rdev->rlc_fw = NULL;
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
}
return err;
}
+u32 r600_gfx_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = rdev->wb.wb[ring->rptr_offs/4];
+ else
+ rptr = RREG32(R600_CP_RB_RPTR);
+
+ return rptr;
+}
+
+u32 r600_gfx_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ u32 wptr;
+
+ wptr = RREG32(R600_CP_RB_WPTR);
+
+ return wptr;
+}
+
+void r600_gfx_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(R600_CP_RB_WPTR, ring->wptr);
+ (void)RREG32(R600_CP_RB_WPTR);
+}
+
static int r600_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -2136,13 +2509,13 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
fw_data = (const __be32 *)rdev->me_fw->data;
WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
+ for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
WREG32(CP_ME_RAM_DATA,
be32_to_cpup(fw_data++));
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < PFP_UCODE_SIZE; i++)
+ for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA,
be32_to_cpup(fw_data++));
@@ -2196,8 +2569,8 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2232,8 +2605,6 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- ring->rptr = RREG32(CP_RB_RPTR);
-
r600_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
@@ -2241,6 +2612,10 @@ int r600_cp_resume(struct radeon_device *rdev)
ring->ready = false;
return r;
}
+
+ if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
return 0;
}
@@ -2250,7 +2625,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
int r;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
+ rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
@@ -2273,129 +2648,6 @@ void r600_cp_fini(struct radeon_device *rdev)
}
/*
- * DMA
- * Starting with R600, the GPU has an asynchronous
- * DMA engine. The programming model is very similar
- * to the 3D engine (ring buffer, IBs, etc.), but the
- * DMA controller has it's own packet format that is
- * different form the PM4 format used by the 3D engine.
- * It supports copying data, writing embedded data,
- * solid fills, and a number of other things. It also
- * has support for tiling/detiling of buffers.
- */
-/**
- * r600_dma_stop - stop the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine (r6xx-evergreen).
- */
-void r600_dma_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl = RREG32(DMA_RB_CNTL);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
-}
-
-/**
- * r600_dma_resume - setup and start the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- u32 rb_cntl, dma_cntl;
- u32 rb_bufsz;
- int r;
-
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR, 0);
- WREG32(DMA_RB_WPTR, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(DMA_RB_RPTR_ADDR_HI,
- upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
- WREG32(DMA_RB_RPTR_ADDR_LO,
- ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
-
- /* enable DMA IBs */
- WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
-
- dma_cntl = RREG32(DMA_CNTL);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL, dma_cntl);
-
- if (rdev->family >= CHIP_RV770)
- WREG32(DMA_MODE, 1);
-
- ring->wptr = 0;
- WREG32(DMA_RB_WPTR, ring->wptr << 2);
-
- ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
-
- WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * r600_dma_fini - tear down the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine and free the ring (r6xx-evergreen).
- */
-void r600_dma_fini(struct radeon_device *rdev)
-{
- r600_dma_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
-}
-
-/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
@@ -2450,60 +2702,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
-/**
- * r600_dma_ring_test - simple async dma engine test
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test the DMA engine by writing using it to write an
- * value to memory. (r6xx-SI).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_ring_test(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ring_lock(rdev, ring, 4);
- if (r) {
- DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
/*
* CP fences/semaphores
*/
@@ -2512,30 +2710,31 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
+
+ if (rdev->family >= CHIP_RV770)
+ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
} else {
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -2555,7 +2754,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_semaphore_ring_emit(struct radeon_device *rdev,
+bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -2567,84 +2766,14 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
- radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
-}
-/*
- * DMA fences/semaphores
- */
-
-/**
- * r600_dma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (r6xx-r7xx).
- */
-void r600_dma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- /* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
- radeon_ring_write(ring, lower_32_bits(fence->seq));
- /* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ return true;
}
/**
- * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- * @semaphore: radeon semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (r6xx-SI).
- */
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 s = emit_wait ? 0 : 1;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
-}
-
-int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- struct radeon_sa_bo *vb = NULL;
- int r;
-
- r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
- if (r) {
- return r;
- }
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
- r600_blit_done_copy(rdev, fence, vb, sem);
- return 0;
-}
-
-/**
- * r600_copy_dma - copy pages using the DMA engine
+ * r600_copy_cpdma - copy pages using the CP DMA engine
*
* @rdev: radeon_device pointer
* @src_offset: src GPU address
@@ -2652,19 +2781,19 @@ int r600_copy_blit(struct radeon_device *rdev,
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
- * Copy GPU paging using the DMA engine (r6xx).
+ * Copy GPU paging using the CP DMA engine (r6xx+).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
+int r600_copy_cpdma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
+ int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
+ u32 size_in_bytes, cur_size_in_bytes, tmp;
int i, num_loops;
int r = 0;
@@ -2674,40 +2803,46 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
- r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
+ radeon_semaphore_sync_to(sem, *fence);
+ radeon_semaphore_sync_rings(rdev, sem, ring->idx);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit);
for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFE)
- cur_size_in_dw = 0xFFFE;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
- (upper_32_bits(src_offset) & 0xff)));
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0x1fffff)
+ cur_size_in_bytes = 0x1fffff;
+ size_in_bytes -= cur_size_in_bytes;
+ tmp = upper_32_bits(src_offset) & 0xff;
+ if (size_in_bytes == 0)
+ tmp |= PACKET3_CP_DMA_CP_SYNC;
+ radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
+ radeon_ring_write(ring, lower_32_bits(src_offset));
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, lower_32_bits(dst_offset));
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, cur_size_in_bytes);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
}
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
@@ -2738,19 +2873,13 @@ static int r600_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
+ /* scratch needs to be initialized before MC */
r = r600_vram_scratch_init(rdev);
if (r)
return r;
r600_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
@@ -2759,12 +2888,6 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -2777,13 +2900,13 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
- return r;
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
}
- /* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2794,15 +2917,7 @@ static int r600_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
- if (r)
- return r;
-
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -2813,10 +2928,6 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = r600_dma_resume(rdev);
- if (r)
- return r;
-
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2857,6 +2968,9 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
+
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
@@ -2870,9 +2984,9 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ radeon_pm_suspend(rdev);
r600_audio_fini(rdev);
r600_cp_stop(rdev);
- r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -2938,16 +3052,20 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
-
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2960,7 +3078,6 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
- r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -2974,10 +3091,9 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
- r600_blit_fini(rdev);
r600_cp_fini(rdev);
- r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3080,104 +3196,6 @@ free_scratch:
return r;
}
-/**
- * r600_dma_ib_test - test an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test a simple IB in the DMA ring (r6xx-SI).
- * Returns 0 on success, error on failure.
- */
-int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_ib ib;
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp = 0;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
- if (r) {
- DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
- }
-
- ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
- ib.ptr[3] = 0xDEADBEEF;
- ib.length_dw = 4;
-
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
- } else {
- DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
- radeon_ib_free(rdev, &ib);
- return r;
-}
-
-/**
- * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (r6xx-r7xx).
- */
-void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
/*
* Interrupts
*
@@ -3194,7 +3212,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
u32 rb_bufsz;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 4);
+ rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -3274,7 +3292,7 @@ static void r600_rlc_start(struct radeon_device *rdev)
WREG32(RLC_CNTL, RLC_ENABLE);
}
-static int r600_rlc_init(struct radeon_device *rdev)
+static int r600_rlc_resume(struct radeon_device *rdev)
{
u32 i;
const __be32 *fw_data;
@@ -3286,45 +3304,22 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_HB_CNTL, 0);
- if (rdev->family == CHIP_ARUBA) {
- WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
- WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
- }
- if (rdev->family <= CHIP_CAYMAN) {
- WREG32(RLC_HB_BASE, 0);
- WREG32(RLC_HB_RPTR, 0);
- WREG32(RLC_HB_WPTR, 0);
- }
- if (rdev->family <= CHIP_CAICOS) {
- WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
- WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
- }
+ WREG32(RLC_HB_BASE, 0);
+ WREG32(RLC_HB_RPTR, 0);
+ WREG32(RLC_HB_WPTR, 0);
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_ARUBA) {
- for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
- }
- } else if (rdev->family >= CHIP_CAYMAN) {
- for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
- }
- } else if (rdev->family >= CHIP_CEDAR) {
- for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
- }
- } else if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else {
- for (i = 0; i < RLC_UCODE_SIZE; i++) {
+ for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
@@ -3432,7 +3427,10 @@ int r600_irq_init(struct radeon_device *rdev)
r600_disable_interrupts(rdev);
/* init rlc */
- ret = r600_rlc_init(rdev);
+ if (rdev->family >= CHIP_CEDAR)
+ ret = evergreen_rlc_resume(rdev);
+ else
+ ret = r600_rlc_resume(rdev);
if (ret) {
r600_ih_ring_fini(rdev);
return ret;
@@ -3451,7 +3449,7 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
@@ -3511,8 +3509,8 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
- u32 d1grph = 0, d2grph = 0;
u32 dma_cntl;
+ u32 thermal_int = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3547,8 +3545,21 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
+
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
+ thermal_int = RREG32(CG_THERMAL_INT) &
+ ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ } else if (rdev->family >= CHIP_RV770) {
+ thermal_int = RREG32(RV770_CG_THERMAL_INT) &
+ ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ }
+ if (rdev->irq.dpm_thermal) {
+ DRM_DEBUG("dpm thermal\n");
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ }
+
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
@@ -3606,8 +3617,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
- WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
- WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3630,6 +3641,11 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
+ if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
+ WREG32(CG_THERMAL_INT, thermal_int);
+ } else if (rdev->family >= CHIP_RV770) {
+ WREG32(RV770_CG_THERMAL_INT, thermal_int);
+ }
return 0;
}
@@ -3779,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -3821,6 +3838,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 ring_index;
bool queue_hotplug = false;
bool queue_hdmi = false;
+ bool queue_thermal = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -3862,7 +3880,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_flip(rdev, 0);
+ radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -3888,7 +3906,7 @@ restart_ih:
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_flip(rdev, 1);
+ radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
@@ -3904,6 +3922,14 @@ restart_ih:
break;
}
break;
+ case 9: /* D1 pflip */
+ DRM_DEBUG("IH: D1 flip\n");
+ radeon_crtc_handle_flip(rdev, 0);
+ break;
+ case 11: /* D2 pflip */
+ DRM_DEBUG("IH: D2 flip\n");
+ radeon_crtc_handle_flip(rdev, 1);
+ break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
@@ -3974,6 +4000,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3988,6 +4018,16 @@ restart_ih:
DRM_DEBUG("IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ rdev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ rdev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
@@ -4004,6 +4044,8 @@ restart_ih:
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
+ if (queue_thermal && rdev->pm.dpm_enabled)
+ schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
@@ -4076,7 +4118,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
{
- u32 link_width_cntl, mask, target_reg;
+ u32 link_width_cntl, mask;
if (rdev->flags & RADEON_IS_IGP)
return;
@@ -4088,7 +4130,7 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
if (ASIC_IS_X2(rdev))
return;
- /* FIXME wait for idle */
+ radeon_gui_idle(rdev);
switch (lanes) {
case 0:
@@ -4107,53 +4149,24 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
break;
case 12:
+ /* not actually supported */
mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
break;
case 16:
- default:
mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
break;
- }
-
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
- if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
- (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
- return;
-
- if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+ default:
+ DRM_ERROR("invalid pcie lane request: %d\n", lanes);
return;
+ }
- link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
- RADEON_PCIE_LC_RECONFIG_NOW |
- R600_PCIE_LC_RENEGOTIATE_EN |
- R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
- link_width_cntl |= mask;
-
- WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
- /* some northbridges can renegotiate the link rather than requiring
- * a complete re-config.
- * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
- */
- if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
- link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
- else
- link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
-
- WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
- RADEON_PCIE_LC_RECONFIG_NOW));
-
- if (rdev->family >= CHIP_RV770)
- target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
- else
- target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
-
- /* wait for lane set to complete */
- link_width_cntl = RREG32(target_reg);
- while (link_width_cntl == 0xffffffff)
- link_width_cntl = RREG32(target_reg);
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
+ link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
int r600_get_pcie_lanes(struct radeon_device *rdev)
@@ -4170,13 +4183,11 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return 0;
- /* FIXME wait for idle */
+ radeon_gui_idle(rdev);
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
- case RADEON_PCIE_LC_LINK_WIDTH_X0:
- return 0;
case RADEON_PCIE_LC_LINK_WIDTH_X1:
return 1;
case RADEON_PCIE_LC_LINK_WIDTH_X2:
@@ -4185,6 +4196,10 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
return 4;
case RADEON_PCIE_LC_LINK_WIDTH_X8:
return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X12:
+ /* not actually supported */
+ return 12;
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
case RADEON_PCIE_LC_LINK_WIDTH_X16:
default:
return 16;
@@ -4195,8 +4210,6 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
- u32 mask;
- int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -4215,14 +4228,11 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (rdev->family <= CHIP_R600)
return;
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & DRM_PCIE_SPEED_50))
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
return;
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -4235,23 +4245,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
/* advertise upconfig capability */
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
@@ -4272,7 +4282,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
tmp = RREG32(0x541c);
WREG32(0x541c, tmp | 0x8);
@@ -4286,39 +4296,39 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
- training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+ training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
training_cntl &= ~LC_POINT_7_PLUS_EN;
- WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
} else {
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
/**
- * r600_get_gpu_clock - return GPU clock counter snapshot
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;