diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
98 files changed, 6283 insertions, 3088 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 306364a1ecd..dbcbfe80aac 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -72,7 +72,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \ - radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ + radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \ evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ @@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ - ci_dpm.o dce6_afmt.o + ci_dpm.o dce6_afmt.o radeon_vm.o # add async DMA block radeon-y += \ @@ -99,6 +99,12 @@ radeon-y += \ uvd_v3_1.o \ uvd_v4_2.o +# add VCE block +radeon-y += \ + radeon_vce.o \ + vce_v1_0.o \ + vce_v2_0.o \ + radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o radeon-$(CONFIG_ACPI) += radeon_acpi.o diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a9338c85630..30d242b2507 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: radeon_crtc->enabled = true; - /* adjust pm to dpms changes BEFORE enabling crtcs */ - radeon_pm_compute_clocks(rdev); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); @@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* adjust pm to dpms changes AFTER disabling crtcs */ - radeon_pm_compute_clocks(rdev); break; } + /* adjust pm to dpms */ + radeon_pm_compute_clocks(rdev); } static void @@ -559,7 +557,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, u32 adjusted_clock = mode->clock; int encoder_mode = atombios_get_encoder_mode(encoder); u32 dp_clock = mode->clock; - int bpc = radeon_get_monitor_bpc(connector); + u32 clock = mode->clock; + int bpc = radeon_crtc->bpc; bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); /* reset the pll flags */ @@ -634,6 +633,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; } + /* adjust pll for deep color modes */ + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + break; + case 10: + clock = (clock * 5) / 4; + break; + case 12: + clock = (clock * 3) / 2; + break; + case 16: + clock = clock * 2; + break; + } + } + /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock * accordingly based on the encoder/transmitter to work around * special hw requirements. @@ -655,7 +672,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, switch (crev) { case 1: case 2: - args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v1.usPixelClock = cpu_to_le16(clock / 10); args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) @@ -667,7 +684,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; break; case 3: - args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10); args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; args.v3.sInput.ucEncodeMode = encoder_mode; args.v3.sInput.ucDispPllConfig = 0; @@ -681,10 +698,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (encoder_mode == ATOM_ENCODER_MODE_HDMI) - /* deep color support */ - args.v3.sInput.usPixelClock = - cpu_to_le16((mode->clock * bpc / 8) / 10); if (dig->coherent_mode) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; @@ -864,14 +877,21 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; - switch (bpc) { - case 8: - default: - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; - break; - case 10: - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; - break; + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; + break; + case 10: + /* yes this is correct, the atom define is wrong */ + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP; + break; + case 12: + /* yes this is correct, the atom define is wrong */ + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; + break; + } } args.v5.ucTransmitterID = encoder_id; args.v5.ucEncoderMode = encoder_mode; @@ -886,20 +906,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; - switch (bpc) { - case 8: - default: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; - break; - case 10: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; - break; - case 12: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; - break; - case 16: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; - break; + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; + break; + case 10: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6; + break; + case 12: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6; + break; + case 16: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; + break; + } } args.v6.ucTransmitterID = encoder_id; args.v6.ucEncoderMode = encoder_mode; @@ -940,6 +962,9 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; int dp_clock; + + /* Assign mode clock for hdmi deep color max clock limit check */ + radeon_connector->pixelclock_for_modeset = mode->clock; radeon_crtc->bpc = radeon_get_monitor_bpc(connector); switch (encoder_mode) { @@ -1021,10 +1046,17 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder); u32 pll_clock = mode->clock; + u32 clock = mode->clock; u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; struct radeon_pll *pll; int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); + /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */ + if (ASIC_IS_DCE5(rdev) && + (encoder_mode == ATOM_ENCODER_MODE_HDMI) && + (radeon_crtc->bpc > 8)) + clock = radeon_crtc->adjusted_clock; + switch (radeon_crtc->pll_id) { case ATOM_PPLL1: pll = &rdev->clock.p1pll; @@ -1059,7 +1091,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode radeon_crtc->crtc_id, &radeon_crtc->ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, - encoder_mode, radeon_encoder->encoder_id, mode->clock, + encoder_mode, radeon_encoder->encoder_id, clock, ref_div, fb_div, frac_fb_div, post_div, radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss); @@ -1104,9 +1136,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); u32 tmp, viewport_w, viewport_h; int r; + bool bypass_lut = false; /* no fb bound */ - if (!atomic && !crtc->fb) { + if (!atomic && !crtc->primary->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } @@ -1116,8 +1149,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, target_fb = fb; } else { - radeon_fb = to_radeon_framebuffer(crtc->fb); - target_fb = crtc->fb; + radeon_fb = to_radeon_framebuffer(crtc->primary->fb); + target_fb = crtc->primary->fb; } /* If atomic, assume fb object is pinned & idle & fenced and @@ -1142,33 +1175,73 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (target_fb->bits_per_pixel) { - case 8: + switch (target_fb->pixel_format) { + case DRM_FORMAT_C8: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); break; - case 15: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_BGRA5551: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); +#endif break; - case 16: + case DRM_FORMAT_RGB565: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); #endif break; - case 24: - case 32: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); #ifdef __BIG_ENDIAN fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); #endif break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_BGRA1010102: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; default: - DRM_ERROR("Unsupported screen depth %d\n", - target_fb->bits_per_pixel); + DRM_ERROR("Unsupported screen format %s\n", + drm_get_format_name(target_fb->pixel_format)); return -EINVAL; } @@ -1176,31 +1249,48 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); /* Set NUM_BANKS. */ - if (rdev->family >= CHIP_BONAIRE) { - unsigned tileb, index, num_banks, tile_split_bytes; + if (rdev->family >= CHIP_TAHITI) { + unsigned index, num_banks; - /* Calculate the macrotile mode index. */ - tile_split_bytes = 64 << tile_split; - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; - tileb = min(tile_split_bytes, tileb); + if (rdev->family >= CHIP_BONAIRE) { + unsigned tileb, tile_split_bytes; - for (index = 0; tileb > 64; index++) { - tileb >>= 1; - } + /* Calculate the macrotile mode index. */ + tile_split_bytes = 64 << tile_split; + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = min(tile_split_bytes, tileb); + + for (index = 0; tileb > 64; index++) + tileb >>= 1; - if (index >= 16) { - DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", - target_fb->bits_per_pixel, tile_split); - return -EINVAL; + if (index >= 16) { + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", + target_fb->bits_per_pixel, tile_split); + return -EINVAL; + } + + num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + } else { + switch (target_fb->bits_per_pixel) { + case 8: + index = 10; + break; + case 16: + index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; + break; + default: + case 32: + index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; + break; + } + + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; } - num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); } else { - /* SI and older. */ - if (rdev->family >= CHIP_TAHITI) - tmp = rdev->config.si.tile_config; - else if (rdev->family >= CHIP_CAYMAN) + /* NI and older. */ + if (rdev->family >= CHIP_CAYMAN) tmp = rdev->config.cayman.tile_config; else tmp = rdev->config.evergreen.tile_config; @@ -1280,6 +1370,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); + /* + * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT + * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to + * retain the full precision throughout the pipeline. + */ + WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset, + (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0), + ~EVERGREEN_LUT_10BIT_BYPASS_EN); + + if (bypass_lut) + DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); + WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); @@ -1312,10 +1414,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); - /* set pageflip to happen anywhere in vblank interval */ - WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); - if (!atomic && fb && fb != crtc->fb) { + if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); @@ -1347,9 +1449,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; u32 tmp, viewport_w, viewport_h; int r; + bool bypass_lut = false; /* no fb bound */ - if (!atomic && !crtc->fb) { + if (!atomic && !crtc->primary->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } @@ -1359,8 +1462,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, target_fb = fb; } else { - radeon_fb = to_radeon_framebuffer(crtc->fb); - target_fb = crtc->fb; + radeon_fb = to_radeon_framebuffer(crtc->primary->fb); + target_fb = crtc->primary->fb; } obj = radeon_fb->obj; @@ -1384,18 +1487,30 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (target_fb->bits_per_pixel) { - case 8: + switch (target_fb->pixel_format) { + case DRM_FORMAT_C8: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | AVIVO_D1GRPH_CONTROL_8BPP_INDEXED; break; - case 15: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + fb_format = + AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | + AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444; +#ifdef __BIG_ENDIAN + fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; +#endif + break; + case DRM_FORMAT_XRGB1555: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555; +#ifdef __BIG_ENDIAN + fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; +#endif break; - case 16: + case DRM_FORMAT_RGB565: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | AVIVO_D1GRPH_CONTROL_16BPP_RGB565; @@ -1403,8 +1518,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT; #endif break; - case 24: - case 32: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; @@ -1412,9 +1527,20 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; #endif break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + fb_format = + AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | + AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010; +#ifdef __BIG_ENDIAN + fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT; +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; default: - DRM_ERROR("Unsupported screen depth %d\n", - target_fb->bits_per_pixel); + DRM_ERROR("Unsupported screen format %s\n", + drm_get_format_name(target_fb->pixel_format)); return -EINVAL; } @@ -1453,6 +1579,13 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, if (rdev->family >= CHIP_R600) WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap); + /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */ + WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, + (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN); + + if (bypass_lut) + DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); + WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); @@ -1481,10 +1614,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); - /* set pageflip to happen anywhere in vblank interval */ - WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); - if (!atomic && fb && fb != crtc->fb) { + if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); @@ -1719,8 +1852,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) } /* otherwise, pick one of the plls */ if ((rdev->family == CHIP_KAVERI) || - (rdev->family == CHIP_KABINI)) { - /* KB/KV has PPLL1 and PPLL2 */ + (rdev->family == CHIP_KABINI) || + (rdev->family == CHIP_MULLINS)) { + /* KB/KV/ML has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1773,6 +1907,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) return ATOM_PPLL1; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; + } else if (ASIC_IS_DCE41(rdev)) { + /* Don't share PLLs on DCE4.1 chips */ + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { + if (rdev->clock.dp_extclk) + /* skip PPLL programming if using ext clock */ + return ATOM_PPLL_INVALID; + } + pll_in_use = radeon_get_pll_use_mask(crtc); + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; } else if (ASIC_IS_DCE4(rdev)) { /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, * depending on the asic: @@ -1800,7 +1948,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) if (pll != ATOM_PPLL_INVALID) return pll; } - } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */ + } else { /* use the same PPLL for all monitors with the same clock */ pll = radeon_get_shared_nondp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) @@ -1870,6 +2018,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) is_tvcv = true; + if (!radeon_crtc->adjusted_clock) + return -EINVAL; + atombios_crtc_set_pll(crtc, adjusted_mode); if (ASIC_IS_DCE4(rdev)) @@ -1957,12 +2108,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - if (crtc->fb) { + if (crtc->primary->fb) { int r; struct radeon_framebuffer *radeon_fb; struct radeon_bo *rbo; - radeon_fb = to_radeon_framebuffer(crtc->fb); + radeon_fb = to_radeon_framebuffer(crtc->primary->fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r)) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 4ad7643fce5..b1e11f8434e 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -95,9 +95,12 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; int recv_bytes; + int r = 0; memset(&args, 0, sizeof(args)); + mutex_lock(&chan->mutex); + base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); radeon_atom_copy_swap(base, send, send_bytes, true); @@ -117,19 +120,22 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, /* timeout */ if (args.v1.ucReplyStatus == 1) { DRM_DEBUG_KMS("dp_aux_ch timeout\n"); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto done; } /* flags not zero */ if (args.v1.ucReplyStatus == 2) { DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); - return -EBUSY; + r = -EIO; + goto done; } /* error */ if (args.v1.ucReplyStatus == 3) { DRM_DEBUG_KMS("dp_aux_ch error\n"); - return -EIO; + r = -EIO; + goto done; } recv_bytes = args.v1.ucDataOutLen; @@ -139,189 +145,89 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, if (recv && recv_size) radeon_atom_copy_swap(recv, base + 16, recv_bytes, false); - return recv_bytes; -} - -static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, - u16 address, u8 *send, u8 send_bytes, u8 delay) -{ - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; - int ret; - u8 msg[20]; - int msg_bytes = send_bytes + 4; - u8 ack; - unsigned retry; - - if (send_bytes > 16) - return -1; - - msg[0] = address; - msg[1] = address >> 8; - msg[2] = DP_AUX_NATIVE_WRITE << 4; - msg[3] = (msg_bytes << 4) | (send_bytes - 1); - memcpy(&msg[4], send, send_bytes); - - for (retry = 0; retry < 7; retry++) { - ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, - msg, msg_bytes, NULL, 0, delay, &ack); - if (ret == -EBUSY) - continue; - else if (ret < 0) - return ret; - ack >>= 4; - if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) - return send_bytes; - else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - usleep_range(400, 500); - else - return -EIO; - } + r = recv_bytes; +done: + mutex_unlock(&chan->mutex); - return -EIO; + return r; } -static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, - u16 address, u8 *recv, int recv_bytes, u8 delay) +#define BARE_ADDRESS_SIZE 3 +#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) + +static ssize_t +radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; - u8 msg[4]; - int msg_bytes = 4; - u8 ack; + struct radeon_i2c_chan *chan = + container_of(aux, struct radeon_i2c_chan, aux); int ret; - unsigned retry; - - msg[0] = address; - msg[1] = address >> 8; - msg[2] = DP_AUX_NATIVE_READ << 4; - msg[3] = (msg_bytes << 4) | (recv_bytes - 1); - - for (retry = 0; retry < 7; retry++) { - ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, - msg, msg_bytes, recv, recv_bytes, delay, &ack); - if (ret == -EBUSY) - continue; - else if (ret < 0) - return ret; - ack >>= 4; - if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) - return ret; - else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - usleep_range(400, 500); - else if (ret == 0) - return -EPROTO; + u8 tx_buf[20]; + size_t tx_size; + u8 ack, delay = 0; + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + tx_buf[0] = msg->address & 0xff; + tx_buf[1] = msg->address >> 8; + tx_buf[2] = msg->request << 4; + tx_buf[3] = msg->size ? (msg->size - 1) : 0; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + /* tx_size needs to be 4 even for bare address packets since the atom + * table needs the info in tx_buf[3]. + */ + tx_size = HEADER_SIZE + msg->size; + if (msg->size == 0) + tx_buf[3] |= BARE_ADDRESS_SIZE << 4; + else + tx_buf[3] |= tx_size << 4; + memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size); + ret = radeon_process_aux_ch(chan, + tx_buf, tx_size, NULL, 0, delay, &ack); + if (ret >= 0) + /* Return payload size. */ + ret = msg->size; + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + /* tx_size needs to be 4 even for bare address packets since the atom + * table needs the info in tx_buf[3]. + */ + tx_size = HEADER_SIZE; + if (msg->size == 0) + tx_buf[3] |= BARE_ADDRESS_SIZE << 4; else - return -EIO; + tx_buf[3] |= tx_size << 4; + ret = radeon_process_aux_ch(chan, + tx_buf, tx_size, msg->buffer, msg->size, delay, &ack); + break; + default: + ret = -EINVAL; + break; } - return -EIO; -} + if (ret >= 0) + msg->reply = ack >> 4; -static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, - u16 reg, u8 val) -{ - radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0); + return ret; } -static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector, - u16 reg) +void radeon_dp_aux_init(struct radeon_connector *radeon_connector) { - u8 val = 0; - - radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0); - - return val; -} - -int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, - u8 write_byte, u8 *read_byte) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; - u16 address = algo_data->address; - u8 msg[5]; - u8 reply[2]; - unsigned retry; - int msg_bytes; - int reply_bytes = 1; int ret; - u8 ack; - - /* Set up the command byte */ - if (mode & MODE_I2C_READ) - msg[2] = DP_AUX_I2C_READ << 4; - else - msg[2] = DP_AUX_I2C_WRITE << 4; - if (!(mode & MODE_I2C_STOP)) - msg[2] |= DP_AUX_I2C_MOT << 4; + radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; + radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; + radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer; - msg[0] = address; - msg[1] = address >> 8; + ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); + if (!ret) + radeon_connector->ddc_bus->has_aux = true; - switch (mode) { - case MODE_I2C_WRITE: - msg_bytes = 5; - msg[3] = msg_bytes << 4; - msg[4] = write_byte; - break; - case MODE_I2C_READ: - msg_bytes = 4; - msg[3] = msg_bytes << 4; - break; - default: - msg_bytes = 4; - msg[3] = 3 << 4; - break; - } - - for (retry = 0; retry < 7; retry++) { - ret = radeon_process_aux_ch(auxch, - msg, msg_bytes, reply, reply_bytes, 0, &ack); - if (ret == -EBUSY) - continue; - else if (ret < 0) { - DRM_DEBUG_KMS("aux_ch failed %d\n", ret); - return ret; - } - - switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) { - case DP_AUX_NATIVE_REPLY_ACK: - /* I2C-over-AUX Reply field is only valid - * when paired with AUX ACK. - */ - break; - case DP_AUX_NATIVE_REPLY_NACK: - DRM_DEBUG_KMS("aux_ch native nack\n"); - return -EREMOTEIO; - case DP_AUX_NATIVE_REPLY_DEFER: - DRM_DEBUG_KMS("aux_ch native defer\n"); - usleep_range(500, 600); - continue; - default: - DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack); - return -EREMOTEIO; - } - - switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) { - case DP_AUX_I2C_REPLY_ACK: - if (mode == MODE_I2C_READ) - *read_byte = reply[0]; - return ret; - case DP_AUX_I2C_REPLY_NACK: - DRM_DEBUG_KMS("aux_i2c nack\n"); - return -EREMOTEIO; - case DP_AUX_I2C_REPLY_DEFER: - DRM_DEBUG_KMS("aux_i2c defer\n"); - usleep_range(400, 500); - break; - default: - DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack); - return -EREMOTEIO; - } - } - - DRM_DEBUG_KMS("aux i2c too many retries, giving up\n"); - return -EREMOTEIO; + WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret); } /***** general DP utility functions *****/ @@ -386,6 +292,19 @@ static int dp_get_max_dp_pix_clock(int link_rate, /***** radeon specific DP functions *****/ +static int radeon_dp_get_max_link_rate(struct drm_connector *connector, + u8 dpcd[DP_DPCD_SIZE]) +{ + int max_link_rate; + + if (radeon_connector_is_dp12_capable(connector)) + max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000); + else + max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000); + + return max_link_rate; +} + /* First get the min lane# when low rate is used according to pixel clock * (prefer low rate), second check max lane# supported by DP panel, * if the max lane# < low rate lane# then use max lane# instead. @@ -395,7 +314,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, int pix_clock) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); - int max_link_rate = drm_dp_max_link_rate(dpcd); + int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd); int max_lane_num = drm_dp_max_lane_count(dpcd); int lane_num; int max_dp_pix_clock; @@ -433,7 +352,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, return 540000; } - return drm_dp_max_link_rate(dpcd); + return radeon_dp_get_max_link_rate(connector, dpcd); } static u8 radeon_dp_encoder_service(struct radeon_device *rdev, @@ -456,12 +375,11 @@ static u8 radeon_dp_encoder_service(struct radeon_device *rdev, u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) { - struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, - dig_connector->dp_i2c_bus->rec.i2c_id, 0); + radeon_connector->ddc_bus->rec.i2c_id, 0); } static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) @@ -472,11 +390,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) return; - if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0)) + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); - if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0)) + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); } @@ -485,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; + + char dpcd_hex_dump[DP_DPCD_SIZE * 3]; - ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, - DP_DPCD_SIZE, 0); + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, + DP_DPCD_SIZE); if (ret > 0) { memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: "); - for (i = 0; i < DP_DPCD_SIZE; i++) - DRM_DEBUG_KMS("%02x ", msg[i]); - DRM_DEBUG_KMS("\n"); + + hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd), + 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); + DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); radeon_dp_probe_oui(radeon_connector); @@ -510,6 +430,7 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector; int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); u8 tmp; @@ -517,21 +438,30 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, if (!ASIC_IS_DCE4(rdev)) return panel_mode; + if (!radeon_connector->con_priv) + return panel_mode; + + dig_connector = radeon_connector->con_priv; + if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ - tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); - if (tmp & 1) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; - else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || - (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) - panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; - else - panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || + (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; + else + panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + } } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { /* eDP */ - tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); - if (tmp & 1) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + } } return panel_mode; @@ -577,37 +507,43 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector, return MODE_OK; } -static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, - u8 link_status[DP_LINK_STATUS_SIZE]) -{ - int ret; - ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, - link_status, DP_LINK_STATUS_SIZE, 100); - if (ret <= 0) { - return false; - } - - DRM_DEBUG_KMS("link status %6ph\n", link_status); - return true; -} - bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) { u8 link_status[DP_LINK_STATUS_SIZE]; struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; - if (!radeon_dp_get_link_status(radeon_connector, link_status)) + if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status) + <= 0) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; return true; } +void radeon_dp_set_rx_power_state(struct drm_connector *connector, + u8 power_state) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector; + + if (!radeon_connector->con_priv) + return; + + dig_connector = radeon_connector->con_priv; + + /* power up/down the sink */ + if (dig_connector->dpcd[0] >= 0x11) { + drm_dp_dpcd_writeb(&radeon_connector->ddc_bus->aux, + DP_SET_POWER, power_state); + usleep_range(1000, 2000); + } +} + + struct radeon_dp_link_train_info { struct radeon_device *rdev; struct drm_encoder *encoder; struct drm_connector *connector; - struct radeon_connector *radeon_connector; int enc_id; int dp_clock; int dp_lane_count; @@ -617,6 +553,7 @@ struct radeon_dp_link_train_info { u8 link_status[DP_LINK_STATUS_SIZE]; u8 tries; bool use_dpencoder; + struct drm_dp_aux *aux; }; static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) @@ -627,8 +564,8 @@ static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info) 0, dp_info->train_set[0]); /* sets all lanes at once */ /* set the vs/emph on the sink */ - radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET, - dp_info->train_set, dp_info->dp_lane_count, 0); + drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET, + dp_info->train_set, dp_info->dp_lane_count); } static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) @@ -663,7 +600,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) } /* enable training pattern on the sink */ - radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp); + drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp); } static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) @@ -673,34 +610,30 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) u8 tmp; /* power up the sink */ - if (dp_info->dpcd[0] >= 0x11) { - radeon_write_dpcd_reg(dp_info->radeon_connector, - DP_SET_POWER, DP_SET_POWER_D0); - usleep_range(1000, 2000); - } + radeon_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0); /* possibly enable downspread on the sink */ if (dp_info->dpcd[3] & 0x1) - radeon_write_dpcd_reg(dp_info->radeon_connector, - DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); + drm_dp_dpcd_writeb(dp_info->aux, + DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); else - radeon_write_dpcd_reg(dp_info->radeon_connector, - DP_DOWNSPREAD_CTRL, 0); + drm_dp_dpcd_writeb(dp_info->aux, + DP_DOWNSPREAD_CTRL, 0); if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { - radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1); + drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1); } /* set the lane count on the sink */ tmp = dp_info->dp_lane_count; if (drm_dp_enhanced_frame_cap(dp_info->dpcd)) tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); + drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp); /* set the link rate on the sink */ tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); - radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); + drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp); /* start training on the source */ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) @@ -711,9 +644,9 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) dp_info->dp_clock, dp_info->enc_id, 0); /* disable the training pattern on the sink */ - radeon_write_dpcd_reg(dp_info->radeon_connector, - DP_TRAINING_PATTERN_SET, - DP_TRAINING_PATTERN_DISABLE); + drm_dp_dpcd_writeb(dp_info->aux, + DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); return 0; } @@ -723,9 +656,9 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info udelay(400); /* disable the training pattern on the sink */ - radeon_write_dpcd_reg(dp_info->radeon_connector, - DP_TRAINING_PATTERN_SET, - DP_TRAINING_PATTERN_DISABLE); + drm_dp_dpcd_writeb(dp_info->aux, + DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); /* disable the training pattern on the source */ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) @@ -757,7 +690,8 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) while (1) { drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { + if (drm_dp_dpcd_read_link_status(dp_info->aux, + dp_info->link_status) <= 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -819,7 +753,8 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) while (1) { drm_dp_link_train_channel_eq_delay(dp_info->dpcd); - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { + if (drm_dp_dpcd_read_link_status(dp_info->aux, + dp_info->link_status) <= 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -902,19 +837,23 @@ void radeon_dp_link_train(struct drm_encoder *encoder, else dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; - tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); - if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) - dp_info.tp3_supported = true; - else + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp) + == 1) { + if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) + dp_info.tp3_supported = true; + else + dp_info.tp3_supported = false; + } else { dp_info.tp3_supported = false; + } memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); dp_info.rdev = rdev; dp_info.encoder = encoder; dp_info.connector = connector; - dp_info.radeon_connector = radeon_connector; dp_info.dp_lane_count = dig_connector->dp_lane_count; dp_info.dp_clock = dig_connector->dp_clock; + dp_info.aux = &radeon_connector->ddc_bus->aux; if (radeon_dp_link_train_init(&dp_info)) goto done; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index a42d61571f4..7d68203a373 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, struct backlight_properties props; struct radeon_backlight_privdata *pdata; struct radeon_encoder_atom_dig *dig; - u8 backlight_level; char bl_name[16]; /* Mac laptops with multiple GPUs use the gmux driver for backlight @@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, pdata->encoder = radeon_encoder; - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); - dig = radeon_encoder->enc_priv; dig->bl_dev = bd; bd->props.brightness = radeon_atom_backlight_get_brightness(bd); + /* Set a reasonable default here if the level is 0 otherwise + * fbdev will attempt to turn the backlight on after console + * unblanking and it will try and restore 0 which turns the backlight + * off again. + */ + if (bd->props.brightness == 0) + bd->props.brightness = RADEON_MAX_BL_LEVEL; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); @@ -464,11 +468,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int bpc = 8; - if (connector) - bpc = radeon_get_monitor_bpc(connector); + if (encoder->crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + bpc = radeon_crtc->bpc; + } switch (bpc) { case 0: @@ -1313,7 +1318,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } if (is_dp) args.v5.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v5.ucLaneNum = 8; else args.v5.ucLaneNum = 4; @@ -1632,10 +1637,16 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = NULL; struct radeon_connector_atom_dig *radeon_dig_connector = NULL; + bool travis_quirk = false; if (connector) { radeon_connector = to_radeon_connector(connector); radeon_dig_connector = radeon_connector->con_priv; + if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == + ENCODER_OBJECT_ID_TRAVIS) && + (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) && + !ASIC_IS_DCE5(rdev)) + travis_quirk = true; } switch (mode) { @@ -1656,17 +1667,13 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); } - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); } else if (ASIC_IS_DCE4(rdev)) { /* setup and enable the encoder */ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - /* enable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); } else { /* setup and enable the encoder and transmitter */ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { @@ -1674,68 +1681,56 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) ATOM_TRANSMITTER_ACTION_POWER_ON); radeon_dig_connector->edp_on = true; } + } + /* enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { + /* DP_SET_POWER_D0 is set in radeon_dp_link_train */ radeon_dp_link_train(encoder, connector); if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + atombios_dig_transmitter_setup(encoder, + ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + if (ext_encoder) + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (ASIC_IS_DCE4(rdev)) { + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); + } + if (ext_encoder) + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + atombios_dig_transmitter_setup(encoder, + ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); + + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && + connector && !travis_quirk) + radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3); + if (ASIC_IS_DCE4(rdev)) { /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_transmitter_setup(encoder, + ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); } else { /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_transmitter_setup(encoder, + ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { - if (ASIC_IS_DCE4(rdev)) - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); + if (travis_quirk) + radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3); if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); radeon_dig_connector->edp_on = false; } } - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); - break; - } -} - -static void -radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder, - struct drm_encoder *ext_encoder, - int mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - - switch (mode) { - case DRM_MODE_DPMS_ON: - default: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) { - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); - } else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) { - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); - } else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); break; } } @@ -1746,7 +1741,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", radeon_encoder->encoder_id, mode, radeon_encoder->devices, @@ -1806,9 +1800,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) return; } - if (ext_encoder) - radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode); - radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } @@ -1897,8 +1888,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; else args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); - } else + } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; + } else { args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index b5162c3b611..9c570fb15b8 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -43,15 +43,19 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; u16 out = cpu_to_le16(0); + int r = 0; memset(&args, 0, sizeof(args)); + mutex_lock(&chan->mutex); + base = (unsigned char *)rdev->mode_info.atom_context->scratch; if (flags & HW_I2C_WRITE) { if (num > ATOM_MAX_HW_I2C_WRITE) { DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); - return -EINVAL; + r = -EINVAL; + goto done; } if (buf == NULL) args.ucRegIndex = 0; @@ -65,7 +69,8 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, } else { if (num > ATOM_MAX_HW_I2C_READ) { DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); - return -EINVAL; + r = -EINVAL; + goto done; } args.ucRegIndex = 0; args.lpI2CDataOut = 0; @@ -82,13 +87,17 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { DRM_DEBUG_KMS("hw_i2c error\n"); - return -EIO; + r = -EIO; + goto done; } if (!(flags & HW_I2C_WRITE)) radeon_atom_copy_swap(buf, base, num, false); - return 0; +done: + mutex_unlock(&chan->mutex); + + return r; } int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0fbd36f3d4e..f81d7ca134d 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -29,6 +29,7 @@ #include "cypress_dpm.h" #include "btc_dpm.h" #include "atom.h" +#include <linux/seq_file.h> #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -2600,6 +2601,10 @@ int btc_dpm_init(struct radeon_device *rdev) pi->min_vddc_in_table = 0; pi->max_vddc_in_table = 0; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = rv7xx_parse_power_table(rdev); if (ret) return ret; @@ -2756,6 +2761,37 @@ void btc_dpm_fini(struct radeon_device *rdev) r600_free_extended_power_table(rdev); } +void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct rv7xx_ps *ps = rv770_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + if (rdev->family >= CHIP_CEDAR) { + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); + } else { + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc); + } + } +} + u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h index 29e32de7e02..9c65be2d55a 100644 --- a/drivers/gpu/drm/radeon/btcd.h +++ b/drivers/gpu/drm/radeon/btcd.h @@ -44,6 +44,10 @@ # define DYN_SPREAD_SPECTRUM_EN (1 << 23) # define AC_DC_SW (1 << 24) +#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c +# define CURRENT_PROFILE_INDEX_MASK (0xf << 4) +# define CURRENT_PROFILE_INDEX_SHIFT 4 + #define CG_BIF_REQ_AND_RSP 0x7f4 #define CG_CLIENT_REQ(x) ((x) << 0) #define CG_CLIENT_REQ_MASK (0xff << 0) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 8d49104ca6c..584090ac3eb 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -21,8 +21,10 @@ * */ +#include <linux/firmware.h> #include "drmP.h" #include "radeon.h" +#include "radeon_ucode.h" #include "cikd.h" #include "r600_dpm.h" #include "ci_dpm.h" @@ -172,6 +174,8 @@ extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); extern int ci_mc_load_microcode(struct radeon_device *rdev); +extern void cik_update_cg(struct radeon_device *rdev, + u32 block, bool enable); static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, struct atom_voltage_table_entry *voltage_table, @@ -200,24 +204,29 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev) struct ci_power_info *pi = ci_get_pi(rdev); switch (rdev->pdev->device) { + case 0x6649: case 0x6650: + case 0x6651: case 0x6658: case 0x665C: + case 0x665D: default: pi->powertune_defaults = &defaults_bonaire_xt; break; - case 0x6651: - case 0x665D: - pi->powertune_defaults = &defaults_bonaire_pro; - break; case 0x6640: - pi->powertune_defaults = &defaults_saturn_xt; - break; case 0x6641: - pi->powertune_defaults = &defaults_saturn_pro; + case 0x6646: + case 0x6647: + pi->powertune_defaults = &defaults_saturn_xt; break; case 0x67B8: case 0x67B0: + pi->powertune_defaults = &defaults_hawaii_xt; + break; + case 0x67BA: + case 0x67B1: + pi->powertune_defaults = &defaults_hawaii_pro; + break; case 0x67A0: case 0x67A1: case 0x67A2: @@ -226,11 +235,7 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev) case 0x67AA: case 0x67B9: case 0x67BE: - pi->powertune_defaults = &defaults_hawaii_xt; - break; - case 0x67BA: - case 0x67B1: - pi->powertune_defaults = &defaults_hawaii_pro; + pi->powertune_defaults = &defaults_bonaire_xt; break; } @@ -746,6 +751,14 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; int i; + if (rps->vce_active) { + rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; + rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; + } else { + rps->evclk = 0; + rps->ecclk = 0; + } + if ((rdev->pm.dpm.new_active_crtc_count > 1) || ci_dpm_vblank_too_short(rdev)) disable_mclk_switching = true; @@ -804,6 +817,13 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, sclk = ps->performance_levels[0].sclk; } + if (rps->vce_active) { + if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) + sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; + if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) + mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; + } + ps->performance_levels[0].sclk = sclk; ps->performance_levels[0].mclk = mclk; @@ -1159,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev) tmp &= ~GLOBAL_PWRMGT_EN; WREG32_SMC(GENERAL_PWRMGT, tmp); - tmp = RREG32(SCLK_PWRMGT_CNTL); + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); tmp &= ~DYNAMIC_PM_EN; WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); @@ -3468,7 +3488,6 @@ static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 0 : -EINVAL; } -#if 0 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) { struct ci_power_info *pi = ci_get_pi(rdev); @@ -3501,6 +3520,7 @@ static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) 0 : -EINVAL; } +#if 0 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) { struct ci_power_info *pi = ci_get_pi(rdev); @@ -3587,7 +3607,6 @@ static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) return ci_enable_uvd_dpm(rdev, !gate); } -#if 0 static u8 ci_get_vce_boot_level(struct radeon_device *rdev) { u8 i; @@ -3608,15 +3627,15 @@ static int ci_update_vce_dpm(struct radeon_device *rdev, struct radeon_ps *radeon_current_state) { struct ci_power_info *pi = ci_get_pi(rdev); - bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0); - bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0); int ret = 0; u32 tmp; - if (new_vce_clock_non_zero != old_vce_clock_non_zero) { - if (new_vce_clock_non_zero) { - pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); + if (radeon_current_state->evclk != radeon_new_state->evclk) { + if (radeon_new_state->evclk) { + /* turn the clocks on when encoding */ + cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); + pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); tmp = RREG32_SMC(DPM_TABLE_475); tmp &= ~VceBootLevel_MASK; tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); @@ -3624,12 +3643,16 @@ static int ci_update_vce_dpm(struct radeon_device *rdev, ret = ci_enable_vce_dpm(rdev, true); } else { + /* turn the clocks off when not encoding */ + cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); + ret = ci_enable_vce_dpm(rdev, false); } } return ret; } +#if 0 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) { return ci_enable_samu_dpm(rdev, gate); @@ -4752,13 +4775,13 @@ int ci_dpm_set_power_state(struct radeon_device *rdev) DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); return ret; } -#if 0 + ret = ci_update_vce_dpm(rdev, new_ps, old_ps); if (ret) { DRM_ERROR("ci_update_vce_dpm failed\n"); return ret; } -#endif + ret = ci_update_sclk_t(rdev); if (ret) { DRM_ERROR("ci_update_sclk_t failed\n"); @@ -4959,9 +4982,6 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -4998,6 +5018,21 @@ static int ci_parse_power_table(struct radeon_device *rdev) power_state_offset += 2 + power_state->v2.ucNumDPMLevels; } rdev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { + u32 sclk, mclk; + clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); + sclk |= clock_info->ci.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); + mclk |= clock_info->ci.ucMemoryClockHigh << 16; + rdev->pm.dpm.vce_states[i].sclk = sclk; + rdev->pm.dpm.vce_states[i].mclk = mclk; + } + return 0; } @@ -5077,17 +5112,25 @@ int ci_dpm_init(struct radeon_device *rdev) ci_dpm_fini(rdev); return ret; } - ret = ci_parse_power_table(rdev); + + ret = r600_get_platform_caps(rdev); if (ret) { ci_dpm_fini(rdev); return ret; } + ret = r600_parse_extended_power_table(rdev); if (ret) { ci_dpm_fini(rdev); return ret; } + ret = ci_parse_power_table(rdev); + if (ret) { + ci_dpm_fini(rdev); + return ret; + } + pi->dll_default_on = false; pi->sram_end = SMC_RAM_END; @@ -5106,6 +5149,12 @@ int ci_dpm_init(struct radeon_device *rdev) pi->mclk_dpm_key_disabled = 0; pi->pcie_dpm_key_disabled = 0; + /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ + if ((rdev->pdev->device == 0x6658) && + (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) { + pi->mclk_dpm_key_disabled = 1; + } + pi->caps_sclk_ds = true; pi->mclk_strobe_mode_threshold = 40000; @@ -5120,6 +5169,7 @@ int ci_dpm_init(struct radeon_device *rdev) pi->caps_sclk_throttle_low_notification = false; pi->caps_uvd_dpm = true; + pi->caps_vce_dpm = true; ci_get_leakage_voltages(rdev); ci_patch_dependency_tables_with_leakage(rdev); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6419ca7cd3..c0ea66192fe 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -38,6 +38,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); MODULE_FIRMWARE("radeon/BONAIRE_ce.bin"); MODULE_FIRMWARE("radeon/BONAIRE_mec.bin"); MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); +MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin"); MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); @@ -46,6 +47,7 @@ MODULE_FIRMWARE("radeon/HAWAII_me.bin"); MODULE_FIRMWARE("radeon/HAWAII_ce.bin"); MODULE_FIRMWARE("radeon/HAWAII_mec.bin"); MODULE_FIRMWARE("radeon/HAWAII_mc.bin"); +MODULE_FIRMWARE("radeon/HAWAII_mc2.bin"); MODULE_FIRMWARE("radeon/HAWAII_rlc.bin"); MODULE_FIRMWARE("radeon/HAWAII_sdma.bin"); MODULE_FIRMWARE("radeon/HAWAII_smc.bin"); @@ -61,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin"); MODULE_FIRMWARE("radeon/KABINI_mec.bin"); MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); +MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); +MODULE_FIRMWARE("radeon/MULLINS_me.bin"); +MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); +MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); +MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); +MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); @@ -72,9 +80,11 @@ extern int sumo_rlc_init(struct radeon_device *rdev); extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern void si_rlc_reset(struct radeon_device *rdev); extern void si_init_uvd_internal_cg(struct radeon_device *rdev); +static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); extern int cik_sdma_resume(struct radeon_device *rdev); extern void cik_sdma_enable(struct radeon_device *rdev, bool enable); extern void cik_sdma_fini(struct radeon_device *rdev); +extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable); static void cik_rlc_stop(struct radeon_device *rdev); static void cik_pcie_gen3_enable(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev); @@ -1095,7 +1105,7 @@ static const u32 spectre_golden_registers[] = 0x8a14, 0xf000003f, 0x00000007, 0x8b24, 0xffffffff, 0x00ffffff, 0x28350, 0x3f3f3fff, 0x00000082, - 0x28355, 0x0000003f, 0x00000000, + 0x28354, 0x0000003f, 0x00000000, 0x3e78, 0x00000001, 0x00000002, 0x913c, 0xffff03df, 0x00000004, 0xc768, 0x00000008, 0x00000008, @@ -1470,6 +1480,43 @@ static const u32 hawaii_mgcg_cgcg_init[] = 0xd80c, 0xff000ff0, 0x00000100 }; +static const u32 godavari_golden_registers[] = +{ + 0x55e4, 0xff607fff, 0xfc000100, + 0x6ed8, 0x00010101, 0x00010000, + 0x9830, 0xffffffff, 0x00000000, + 0x98302, 0xf00fffff, 0x00000400, + 0x6130, 0xffffffff, 0x00010000, + 0x5bb0, 0x000000f0, 0x00000070, + 0x5bc0, 0xf0311fff, 0x80300000, + 0x98f8, 0x73773777, 0x12010001, + 0x98fc, 0xffffffff, 0x00000010, + 0x8030, 0x00001f0f, 0x0000100a, + 0x2f48, 0x73773777, 0x12010001, + 0x2408, 0x000fffff, 0x000c007f, + 0x8a14, 0xf000003f, 0x00000007, + 0x8b24, 0xffffffff, 0x00ff0fff, + 0x30a04, 0x0000ff0f, 0x00000000, + 0x28a4c, 0x07ffffff, 0x06000000, + 0x4d8, 0x00000fff, 0x00000100, + 0xd014, 0x00010000, 0x00810001, + 0xd814, 0x00010000, 0x00810001, + 0x3e78, 0x00000001, 0x00000002, + 0xc768, 0x00000008, 0x00000008, + 0xc770, 0x00000f00, 0x00000800, + 0xc774, 0x00000f00, 0x00000800, + 0xc798, 0x00ffffff, 0x00ff7fbf, + 0xc79c, 0x00ffffff, 0x00ff7faf, + 0x8c00, 0x000000ff, 0x00000001, + 0x214f8, 0x01ff01ff, 0x00000002, + 0x21498, 0x007ff800, 0x00200000, + 0x2015c, 0xffffffff, 0x00000f40, + 0x88c4, 0x001f3ae3, 0x00000082, + 0x88d4, 0x0000001f, 0x00000010, + 0x30934, 0xffffffff, 0x00000000 +}; + + static void cik_init_golden_registers(struct radeon_device *rdev) { switch (rdev->family) { @@ -1501,6 +1548,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev) kalindi_golden_spm_registers, (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); break; + case CHIP_MULLINS: + radeon_program_register_sequence(rdev, + kalindi_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + radeon_program_register_sequence(rdev, + godavari_golden_registers, + (const u32)ARRAY_SIZE(godavari_golden_registers)); + radeon_program_register_sequence(rdev, + kalindi_golden_common_registers, + (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); + radeon_program_register_sequence(rdev, + kalindi_golden_spm_registers, + (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); + break; case CHIP_KAVERI: radeon_program_register_sequence(rdev, spectre_mgcg_cgcg_init, @@ -1702,20 +1763,20 @@ int ci_mc_load_microcode(struct radeon_device *rdev) const __be32 *fw_data; u32 running, blackout = 0; u32 *io_mc_regs; - int i, ucode_size, regs_size; + int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; + ucode_size = rdev->mc_fw->size / 4; + switch (rdev->family) { case CHIP_BONAIRE: io_mc_regs = (u32 *)&bonaire_io_mc_regs; - ucode_size = CIK_MC_UCODE_SIZE; regs_size = BONAIRE_IO_MC_REGS_SIZE; break; case CHIP_HAWAII: io_mc_regs = (u32 *)&hawaii_io_mc_regs; - ucode_size = HAWAII_MC_UCODE_SIZE; regs_size = HAWAII_IO_MC_REGS_SIZE; break; default: @@ -1782,7 +1843,7 @@ static int cik_init_microcode(struct radeon_device *rdev) const char *chip_name; size_t pfp_req_size, me_req_size, ce_req_size, mec_req_size, rlc_req_size, mc_req_size = 0, - sdma_req_size, smc_req_size = 0; + sdma_req_size, smc_req_size = 0, mc2_req_size = 0; char fw_name[30]; int err; @@ -1796,7 +1857,8 @@ static int cik_init_microcode(struct radeon_device *rdev) ce_req_size = CIK_CE_UCODE_SIZE * 4; mec_req_size = CIK_MEC_UCODE_SIZE * 4; rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; - mc_req_size = CIK_MC_UCODE_SIZE * 4; + mc_req_size = BONAIRE_MC_UCODE_SIZE * 4; + mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); break; @@ -1808,6 +1870,7 @@ static int cik_init_microcode(struct radeon_device *rdev) mec_req_size = CIK_MEC_UCODE_SIZE * 4; rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; mc_req_size = HAWAII_MC_UCODE_SIZE * 4; + mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4); break; @@ -1829,6 +1892,15 @@ static int cik_init_microcode(struct radeon_device *rdev) rlc_req_size = KB_RLC_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; break; + case CHIP_MULLINS: + chip_name = "MULLINS"; + pfp_req_size = CIK_PFP_UCODE_SIZE * 4; + me_req_size = CIK_ME_UCODE_SIZE * 4; + ce_req_size = CIK_CE_UCODE_SIZE * 4; + mec_req_size = CIK_MEC_UCODE_SIZE * 4; + rlc_req_size = ML_RLC_UCODE_SIZE * 4; + sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; + break; default: BUG(); } @@ -1903,16 +1975,22 @@ static int cik_init_microcode(struct radeon_device *rdev) /* No SMC, MC ucode on APUs */ if (!(rdev->flags & RADEON_IS_IGP)) { - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->mc_fw->size != mc_req_size) { + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)){ printk(KERN_ERR "cik_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); err = -EINVAL; } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); @@ -2028,6 +2106,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 6: @@ -2048,6 +2127,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); break; case 10: @@ -2070,6 +2150,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: @@ -2092,6 +2173,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); break; case 28: @@ -2209,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 8) { @@ -2246,6 +2329,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 6: @@ -2266,6 +2350,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); break; case 10: @@ -2288,6 +2373,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: @@ -2310,6 +2396,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); break; case 28: @@ -2466,6 +2553,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 6: @@ -2486,6 +2574,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); break; case 10: @@ -2508,6 +2597,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: @@ -2530,6 +2620,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_16x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); break; case 28: @@ -2592,6 +2683,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 6: @@ -2612,6 +2704,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); break; case 10: @@ -2634,6 +2727,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: @@ -2656,6 +2750,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); break; case 28: @@ -2812,6 +2907,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 5: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); break; case 6: @@ -2827,11 +2923,13 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) TILE_SPLIT(split_equal_to_row_size)); break; case 8: - gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED); + gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + PIPE_CONFIG(ADDR_SURF_P2); break; case 9: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); + MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P2)); break; case 10: gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | @@ -2853,6 +2951,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 13: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); break; case 14: @@ -2875,7 +2974,8 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) break; case 27: gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); + MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P2)); break; case 28: gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | @@ -3046,7 +3146,7 @@ static u32 cik_create_bitmask(u32 bit_width) } /** - * cik_select_se_sh - select which SE, SH to address + * cik_get_rb_disabled - computes the mask of disabled RBs * * @rdev: radeon_device pointer * @max_rb_num: max RBs (render backends) for the asic @@ -3159,7 +3259,7 @@ static void cik_gpu_init(struct radeon_device *rdev) u32 mc_shared_chmap, mc_arb_ramcfg; u32 hdp_host_path_cntl; u32 tmp; - int i, j; + int i, j, k; switch (rdev->family) { case CHIP_BONAIRE: @@ -3240,6 +3340,7 @@ static void cik_gpu_init(struct radeon_device *rdev) gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_KABINI: + case CHIP_MULLINS: default: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 2; @@ -3347,6 +3448,15 @@ static void cik_gpu_init(struct radeon_device *rdev) rdev->config.cik.max_sh_per_se, rdev->config.cik.max_backends_per_se); + for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { + for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { + for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { + rdev->config.cik.active_cus += + hweight32(cik_get_cu_active_bitmap(rdev, i, j)); + } + } + } + /* set HW defaults for 3D engine */ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); @@ -3599,7 +3709,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); return true; @@ -3670,6 +3780,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -3718,7 +3829,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, WRITE_DATA_DST_SEL(1)); radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); radeon_ring_write(ring, next_rptr); } @@ -4030,8 +4141,6 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) WREG32(CP_RB0_BASE, rb_addr); WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr)); - ring->rptr = RREG32(CP_RB0_RPTR); - /* start the ring */ cik_cp_gfx_start(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; @@ -4134,8 +4243,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) { if (enable) WREG32(CP_MEC_CNTL, 0); - else + else { WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); + rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; + } udelay(50); } @@ -4586,8 +4698,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) rdev->ring[idx].wptr = 0; mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr; WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr); - rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR); - mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr; + mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR); /* set the vmid for the queue */ mqd->queue_state.cp_hqd_vmid = 0; @@ -5117,11 +5228,9 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -5298,6 +5407,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | ENABLE_L1_TLB | + ENABLE_L1_FRAGMENT_PROCESSING | SYSTEM_ACCESS_MODE_NOT_IN_SYS | ENABLE_ADVANCED_DRIVER_MODEL | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); @@ -5310,7 +5420,8 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | - L2_CACHE_BIGK_FRAGMENT_SIZE(6)); + BANK_SELECT(4) | + L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); @@ -5346,6 +5457,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | @@ -5770,6 +5882,9 @@ static int cik_rlc_resume(struct radeon_device *rdev) case CHIP_KABINI: size = KB_RLC_UCODE_SIZE; break; + case CHIP_MULLINS: + size = ML_RLC_UCODE_SIZE; + break; } cik_rlc_stop(rdev); @@ -6141,6 +6256,10 @@ void cik_update_cg(struct radeon_device *rdev, cik_enable_hdp_mgcg(rdev, enable); cik_enable_hdp_ls(rdev, enable); } + + if (block & RADEON_CG_BLOCK_VCE) { + vce_v2_0_enable_mgcg(rdev, enable); + } } static void cik_init_cg(struct radeon_device *rdev) @@ -6514,12 +6633,13 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_KABINI: + case CHIP_MULLINS: buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_HAWAII: - buffer[count++] = 0x3a00161a; - buffer[count++] = 0x0000002e; + buffer[count++] = cpu_to_le32(0x3a00161a); + buffer[count++] = cpu_to_le32(0x0000002e); break; default: buffer[count++] = cpu_to_le32(0x00000000); @@ -6659,6 +6779,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } + /* pflip */ + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } /* dac hotplug */ WREG32(DAC_AUTODETECT_INT_CONTROL, 0); @@ -7015,6 +7148,25 @@ int cik_irq_set(struct radeon_device *rdev) WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); @@ -7051,6 +7203,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev) rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); + rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC1_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC5_REGISTER_OFFSET); + } + + if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) @@ -7061,6 +7236,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); if (rdev->num_crtc >= 4) { + if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) @@ -7072,6 +7253,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) } if (rdev->num_crtc >= 6) { + if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) @@ -7190,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -7277,7 +7465,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); + radeon_crtc_handle_vblank(rdev, 0); rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -7303,7 +7491,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); + radeon_crtc_handle_vblank(rdev, 1); rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } @@ -7329,7 +7517,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_flip(rdev, 2); + radeon_crtc_handle_vblank(rdev, 2); rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); } @@ -7355,7 +7543,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_flip(rdev, 3); + radeon_crtc_handle_vblank(rdev, 3); rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); } @@ -7381,7 +7569,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_flip(rdev, 4); + radeon_crtc_handle_vblank(rdev, 4); rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); } @@ -7407,7 +7595,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_flip(rdev, 5); + radeon_crtc_handle_vblank(rdev, 5); rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); } @@ -7423,6 +7611,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: @@ -7481,14 +7678,30 @@ restart_ih: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); cik_vm_decode_fault(rdev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + break; + case 167: /* VCE */ + DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data); + switch (src_data) { + case 0: + radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX); + break; + case 1: + radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } break; case 176: /* GFX RB CP_INT */ case 177: /* GFX IB CP_INT */ @@ -7789,6 +8002,22 @@ static int cik_startup(struct radeon_device *rdev) if (r) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; + r = radeon_vce_resume(rdev); + if (!r) { + r = vce_v2_0_resume(rdev); + if (!r) + r = radeon_fence_driver_start_ring(rdev, + TN_RING_TYPE_VCE1_INDEX); + if (!r) + r = radeon_fence_driver_start_ring(rdev, + TN_RING_TYPE_VCE2_INDEX); + } + if (r) { + dev_err(rdev->dev, "VCE init error (%d).\n", r); + rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; + rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; + } + /* Enable IRQ */ if (!rdev->irq.installed) { r = radeon_irq_kms_init(rdev); @@ -7864,6 +8093,23 @@ static int cik_startup(struct radeon_device *rdev) DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); } + r = -ENOENT; + + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + VCE_CMD_NO_OP); + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + if (ring->ring_size) + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + VCE_CMD_NO_OP); + + if (!r) + r = vce_v1_0_init(rdev); + else if (r != -ENOENT) + DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -7902,7 +8148,8 @@ int cik_resume(struct radeon_device *rdev) /* init golden registers */ cik_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cik_startup(rdev); @@ -7934,6 +8181,7 @@ int cik_suspend(struct radeon_device *rdev) cik_sdma_enable(rdev, false); uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + radeon_vce_suspend(rdev); cik_fini_pg(rdev); cik_fini_cg(rdev); cik_irq_suspend(rdev); @@ -8066,6 +8314,17 @@ int cik_init(struct radeon_device *rdev) r600_ring_init(rdev, ring, 4096); } + r = radeon_vce_init(rdev); + if (!r) { + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + } + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -8127,6 +8386,7 @@ void cik_fini(struct radeon_device *rdev) radeon_irq_kms_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + radeon_vce_fini(rdev); cik_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); @@ -8865,6 +9125,41 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) return r; } +int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) +{ + int r, i; + struct atom_clock_dividers dividers; + u32 tmp; + + r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + ecclk, false, ÷rs); + if (r) + return r; + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; + + tmp = RREG32_SMC(CG_ECLK_CNTL); + tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK); + tmp |= dividers.post_divider; + WREG32_SMC(CG_ECLK_CNTL, tmp); + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; + + return 0; +} + static void cik_pcie_gen3_enable(struct radeon_device *rdev) { struct pci_dev *root = rdev->pdev->bus->self; diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1ecb3f1070e..8e9d0f1d858 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -141,7 +141,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, next_rptr += 4; radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, next_rptr); } @@ -151,7 +151,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ - radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); radeon_ring_write(ring, ib->length_dw); } @@ -203,8 +203,8 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, /* write the fence */ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); - radeon_ring_write(ring, addr & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); + radeon_ring_write(ring, upper_32_bits(addr)); radeon_ring_write(ring, fence->seq); /* generate an interrupt */ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); @@ -233,7 +233,7 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); radeon_ring_write(ring, addr & 0xfffffff8); - radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(addr)); return true; } @@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); } + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; } /** @@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) u32 me_cntl, reg_offset; int i; + if (enable == false) { + cik_sdma_gfx_stop(rdev); + cik_sdma_rlc_stop(rdev); + } + for (i = 0; i < 2; i++) { if (i == 0) reg_offset = SDMA0_REGISTER_OFFSET; @@ -362,8 +369,6 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) ring->wptr = 0; WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); - ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2; - /* enable DMA RB */ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); @@ -420,10 +425,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) if (!rdev->sdma_fw) return -EINVAL; - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); - /* halt the MEs */ cik_sdma_enable(rdev, false); @@ -492,9 +493,6 @@ int cik_sdma_resume(struct radeon_device *rdev) */ void cik_sdma_fini(struct radeon_device *rdev) { - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); /* halt the MEs */ cik_sdma_enable(rdev, false); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); @@ -553,10 +551,10 @@ int cik_copy_dma(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); radeon_ring_write(ring, cur_size_in_bytes); radeon_ring_write(ring, 0); /* src/dst endian swap */ - radeon_ring_write(ring, src_offset & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); - radeon_ring_write(ring, dst_offset & 0xffffffff); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(src_offset)); + radeon_ring_write(ring, upper_32_bits(src_offset)); + radeon_ring_write(ring, lower_32_bits(dst_offset)); + radeon_ring_write(ring, upper_32_bits(dst_offset)); src_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes; } @@ -564,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -599,14 +598,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev, tmp = 0xCAFEDEAD; writel(tmp, ptr); - r = radeon_ring_lock(rdev, ring, 4); + r = radeon_ring_lock(rdev, ring, 5); if (r) { DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); return r; } radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring); @@ -661,7 +660,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; + ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; @@ -713,11 +712,9 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) mask = RADEON_RESET_DMA1; if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -745,7 +742,26 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, trace_radeon_vm_set_page(pe, addr, count, incr, flags); - if (flags & R600_PTE_SYSTEM) { + if (flags == R600_PTE_GART) { + uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; + while (count) { + unsigned bytes = count * 8; + if (bytes > 0x1FFFF8) + bytes = 0x1FFFF8; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + + pe += bytes; + src += bytes; + count -= bytes / 8; + } + } else if (flags & R600_PTE_SYSTEM) { while (count) { ndw = count * 2; if (ndw > 0xFFFFE) diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 98bae9d7b74..0c6e1b55d96 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -203,6 +203,12 @@ #define CTF_TEMP_MASK 0x0003fe00 #define CTF_TEMP_SHIFT 9 +#define CG_ECLK_CNTL 0xC05000AC +# define ECLK_DIVIDER_MASK 0x7f +# define ECLK_DIR_CNTL_EN (1 << 8) +#define CG_ECLK_STATUS 0xC05000B0 +# define ECLK_STATUS (1 << 0) + #define CG_SPLL_FUNC_CNTL 0xC0500140 #define SPLL_RESET (1 << 0) #define SPLL_PWRON (1 << 1) @@ -476,6 +482,7 @@ #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) +#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24) #define VM_CONTEXT1_CNTL 0x1414 #define VM_CONTEXT0_CNTL2 0x1430 #define VM_CONTEXT1_CNTL2 0x1434 @@ -882,6 +889,15 @@ # define DC_HPD6_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ +#define GRPH_INT_STATUS 0x6858 +# define GRPH_PFLIP_INT_OCCURRED (1 << 0) +# define GRPH_PFLIP_INT_CLEAR (1 << 8) +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ +#define GRPH_INT_CONTROL 0x685c +# define GRPH_PFLIP_INT_MASK (1 << 0) +# define GRPH_PFLIP_INT_TYPE (1 << 8) + #define DAC_AUTODETECT_INT_CONTROL 0x67c8 #define DC_HPD1_INT_STATUS 0x601c @@ -1736,12 +1752,12 @@ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ #define EOP_TCL1_ACTION_EN (1 << 16) #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ +#define EOP_TCL2_VOLATILE (1 << 24) #define EOP_CACHE_POLICY(x) ((x) << 25) /* 0 - LRU * 1 - Stream * 2 - Bypass */ -#define EOP_TCL2_VOLATILE (1 << 27) #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data @@ -2010,4 +2026,47 @@ /* UVD CTX indirect */ #define UVD_CGC_MEM_CTRL 0xC0 +/* VCE */ + +#define VCE_VCPU_CACHE_OFFSET0 0x20024 +#define VCE_VCPU_CACHE_SIZE0 0x20028 +#define VCE_VCPU_CACHE_OFFSET1 0x2002c +#define VCE_VCPU_CACHE_SIZE1 0x20030 +#define VCE_VCPU_CACHE_OFFSET2 0x20034 +#define VCE_VCPU_CACHE_SIZE2 0x20038 +#define VCE_RB_RPTR2 0x20178 +#define VCE_RB_WPTR2 0x2017c +#define VCE_RB_RPTR 0x2018c +#define VCE_RB_WPTR 0x20190 +#define VCE_CLOCK_GATING_A 0x202f8 +# define CGC_CLK_GATE_DLY_TIMER_MASK (0xf << 0) +# define CGC_CLK_GATE_DLY_TIMER(x) ((x) << 0) +# define CGC_CLK_GATER_OFF_DLY_TIMER_MASK (0xff << 4) +# define CGC_CLK_GATER_OFF_DLY_TIMER(x) ((x) << 4) +# define CGC_UENC_WAIT_AWAKE (1 << 18) +#define VCE_CLOCK_GATING_B 0x202fc +#define VCE_CGTT_CLK_OVERRIDE 0x207a0 +#define VCE_UENC_CLOCK_GATING 0x207bc +# define CLOCK_ON_DELAY_MASK (0xf << 0) +# define CLOCK_ON_DELAY(x) ((x) << 0) +# define CLOCK_OFF_DELAY_MASK (0xff << 4) +# define CLOCK_OFF_DELAY(x) ((x) << 4) +#define VCE_UENC_REG_CLOCK_GATING 0x207c0 +#define VCE_SYS_INT_EN 0x21300 +# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) +#define VCE_LMI_CTRL2 0x21474 +#define VCE_LMI_CTRL 0x21498 +#define VCE_LMI_VM_CTRL 0x214a0 +#define VCE_LMI_SWAP_CNTL 0x214b4 +#define VCE_LMI_SWAP_CNTL1 0x214b8 +#define VCE_LMI_CACHE_CTRL 0x214f4 + +#define VCE_CMD_NO_OP 0x00000000 +#define VCE_CMD_END 0x00000001 +#define VCE_CMD_IB 0x00000002 +#define VCE_CMD_FENCE 0x00000003 +#define VCE_CMD_TRAP 0x00000004 +#define VCE_CMD_IB_AUTO 0x00000005 +#define VCE_CMD_SEMAPHORE 0x00000006 + #endif diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h index aa908c55a51..e48a14037b7 100644 --- a/drivers/gpu/drm/radeon/clearstate_cayman.h +++ b/drivers/gpu/drm/radeon/clearstate_cayman.h @@ -1050,7 +1050,7 @@ static const struct cs_extent_def SECT_CONTEXT_defs[] = {SECT_CONTEXT_def_5, 0x0000a29e, 5 }, {SECT_CONTEXT_def_6, 0x0000a2a5, 56 }, {SECT_CONTEXT_def_7, 0x0000a2de, 290 }, - { 0, 0, 0 } + { NULL, 0, 0 } }; static const u32 SECT_CLEAR_def_1[] = { @@ -1061,7 +1061,7 @@ static const u32 SECT_CLEAR_def_1[] = static const struct cs_extent_def SECT_CLEAR_defs[] = { {SECT_CLEAR_def_1, 0x0000ffc0, 3 }, - { 0, 0, 0 } + { NULL, 0, 0 } }; static const u32 SECT_CTRLCONST_def_1[] = { @@ -1071,11 +1071,11 @@ static const u32 SECT_CTRLCONST_def_1[] = static const struct cs_extent_def SECT_CTRLCONST_defs[] = { {SECT_CTRLCONST_def_1, 0x0000f3fc, 2 }, - { 0, 0, 0 } + { NULL, 0, 0 } }; static const struct cs_section_def cayman_cs_data[] = { { SECT_CONTEXT_defs, SECT_CONTEXT }, { SECT_CLEAR_defs, SECT_CLEAR }, { SECT_CTRLCONST_defs, SECT_CTRLCONST }, - { 0, SECT_NONE } + { NULL, SECT_NONE } }; diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h index c3982f9475f..f55d06664e3 100644 --- a/drivers/gpu/drm/radeon/clearstate_ci.h +++ b/drivers/gpu/drm/radeon/clearstate_ci.h @@ -936,9 +936,9 @@ static const struct cs_extent_def ci_SECT_CONTEXT_defs[] = {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 }, {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, - { 0, 0, 0 } + { NULL, 0, 0 } }; static const struct cs_section_def ci_cs_data[] = { { ci_SECT_CONTEXT_defs, SECT_CONTEXT }, - { 0, SECT_NONE } + { NULL, SECT_NONE } }; diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h index b994cb2a35a..66e39cdb5cb 100644 --- a/drivers/gpu/drm/radeon/clearstate_si.h +++ b/drivers/gpu/drm/radeon/clearstate_si.h @@ -933,9 +933,9 @@ static const struct cs_extent_def si_SECT_CONTEXT_defs[] = {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 }, {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, - { 0, 0, 0 } + { NULL, 0, 0 } }; static const struct cs_section_def si_cs_data[] = { { si_SECT_CONTEXT_defs, SECT_CONTEXT }, - { 0, SECT_NONE } + { NULL, SECT_NONE } }; diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index cf783fc0ef2..47d31e91575 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } return 0; @@ -2036,6 +2036,10 @@ int cypress_dpm_init(struct radeon_device *rdev) pi->min_vddc_in_table = 0; pi->max_vddc_in_table = 0; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = rv7xx_parse_power_table(rdev); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c new file mode 100644 index 00000000000..51800e340a5 --- /dev/null +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -0,0 +1,244 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * Copyright 2014 Rafał Miłecki + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <linux/hdmi.h> +#include <drm/drmP.h> +#include "radeon.h" +#include "radeon_asic.h" +#include "r600d.h" + +static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; + u32 tmp; + u8 *sadb; + int sad_count; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + radeon_connector = to_radeon_connector(connector); + break; + } + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); + if (sad_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + return; + } + + /* program the speaker allocation */ + tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); + tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); + /* set HDMI mode */ + tmp |= HDMI_CONNECTION; + if (sad_count) + tmp |= SPEAKER_ALLOCATION(sadb[0]); + else + tmp |= SPEAKER_ALLOCATION(5); /* stereo */ + WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); + + kfree(sadb); +} + +static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) +{ + struct radeon_device *rdev = encoder->dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector = NULL; + struct cea_sad *sads; + int i, sad_count; + + static const u16 eld_reg_to_type[][2] = { + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, + { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, + }; + + list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + radeon_connector = to_radeon_connector(connector); + break; + } + } + + if (!radeon_connector) { + DRM_ERROR("Couldn't find encoder's connector\n"); + return; + } + + sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); + if (sad_count < 0) { + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + return; + } + BUG_ON(!sads); + + for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { + u32 value = 0; + u8 stereo_freqs = 0; + int max_channels = -1; + int j; + + for (j = 0; j < sad_count; j++) { + struct cea_sad *sad = &sads[j]; + + if (sad->format == eld_reg_to_type[i][1]) { + if (sad->channels > max_channels) { + value = MAX_CHANNELS(sad->channels) | + DESCRIPTOR_BYTE_2(sad->byte2) | + SUPPORTED_FREQUENCIES(sad->freq); + max_channels = sad->channels; + } + + if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) + stereo_freqs |= sad->freq; + else + break; + } + } + + value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); + + WREG32(eld_reg_to_type[i][0], value); + } + + kfree(sads); +} + +/* + * update the info frames with the data from the current display mode + */ +void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; + struct hdmi_avi_infoframe frame; + uint32_t offset; + ssize_t err; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (!dig->afmt->enabled) + return; + offset = dig->afmt->offset; + + /* disable audio prior to setting up hw */ + dig->afmt->pin = r600_audio_get_pin(rdev); + r600_audio_enable(rdev, dig->afmt->pin, false); + + r600_audio_set_dto(encoder, mode->clock); + + WREG32(HDMI0_VBI_PACKET_CONTROL + offset, + HDMI0_NULL_SEND); /* send null packets when required */ + + WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000); + + if (ASIC_IS_DCE32(rdev)) { + WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, + HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ + HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ + WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, + AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */ + AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ + } else { + WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, + HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ + HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ + HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ + HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ + } + + if (ASIC_IS_DCE32(rdev)) { + dce3_2_afmt_write_speaker_allocation(encoder); + dce3_2_afmt_write_sad_regs(encoder); + } + + WREG32(HDMI0_ACR_PACKET_CONTROL + offset, + HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ + HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ + + WREG32(HDMI0_VBI_PACKET_CONTROL + offset, + HDMI0_NULL_SEND | /* send null packets when required */ + HDMI0_GC_SEND | /* send general control packets */ + HDMI0_GC_CONT); /* send general control packets every frame */ + + /* TODO: HDMI0_AUDIO_INFO_UPDATE */ + WREG32(HDMI0_INFOFRAME_CONTROL0 + offset, + HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ + HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ + HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */ + + WREG32(HDMI0_INFOFRAME_CONTROL1 + offset, + HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ + HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */ + + WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */ + + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + if (err < 0) { + DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); + return; + } + + err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); + return; + } + + r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); + r600_hdmi_update_ACR(encoder, mode->clock); + + /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ + WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF); + WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF); + WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001); + WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); + + r600_hdmi_audio_workaround(encoder); + + /* enable audio after to setting up hw */ + r600_audio_enable(rdev, dig->afmt->pin, true); +} diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 713a5d35990..0a65dc7e93e 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev) return !ASIC_IS_NODCE(rdev); } -static void dce6_audio_enable(struct radeon_device *rdev, - struct r600_audio_pin *pin, - bool enable) +void dce6_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable) { + if (!pin) + return; + WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, - AUDIO_ENABLED); - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); + enable ? AUDIO_ENABLED : 0); } static const u32 pin_offsets[7] = @@ -307,11 +309,17 @@ int dce6_audio_init(struct radeon_device *rdev) rdev->audio.enabled = true; - if (ASIC_IS_DCE8(rdev)) + if (ASIC_IS_DCE81(rdev)) /* KV: 4 streams, 7 endpoints */ + rdev->audio.num_pins = 7; + else if (ASIC_IS_DCE83(rdev)) /* KB: 2 streams, 3 endpoints */ + rdev->audio.num_pins = 3; + else if (ASIC_IS_DCE8(rdev)) /* BN/HW: 6 streams, 7 endpoints */ + rdev->audio.num_pins = 7; + else if (ASIC_IS_DCE61(rdev)) /* TN: 4 streams, 6 endpoints */ rdev->audio.num_pins = 6; - else if (ASIC_IS_DCE61(rdev)) - rdev->audio.num_pins = 4; - else + else if (ASIC_IS_DCE64(rdev)) /* OL: 2 streams, 2 endpoints */ + rdev->audio.num_pins = 2; + else /* SI: 6 streams, 6 endpoints */ rdev->audio.num_pins = 6; for (i = 0; i < rdev->audio.num_pins; i++) { @@ -323,7 +331,8 @@ int dce6_audio_init(struct radeon_device *rdev) rdev->audio.pin[i].connected = false; rdev->audio.pin[i].offset = pin_offsets[i]; rdev->audio.pin[i].id = i; - dce6_audio_enable(rdev, &rdev->audio.pin[i], true); + /* disable audio. it will be set up later */ + dce6_audio_enable(rdev, &rdev->audio.pin[i], false); } return 0; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f2b9e21ce4d..15e4f28015e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002, 0x913c, 0x0000000f, 0x0000000a }; @@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002 }; @@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] = static const u32 supersumo_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] = static const u32 wrestler_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -1301,36 +1301,6 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) } /** - * radeon_irq_kms_pflip_irq_get - pre-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to prepare for pageflip on - * - * Pre-pageflip callback (evergreen+). - * Enables the pageflip irq (vblank irq). - */ -void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) -{ - /* enable the pflip int */ - radeon_irq_kms_pflip_irq_get(rdev, crtc); -} - -/** - * evergreen_post_page_flip - pos-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to cleanup pageflip on - * - * Post-pageflip callback (evergreen+). - * Disables the pageflip irq (vblank irq). - */ -void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) -{ - /* disable the pflip int */ - radeon_irq_kms_pflip_irq_put(rdev, crtc); -} - -/** * evergreen_page_flip - pageflip callback. * * @rdev: radeon_device pointer @@ -1343,7 +1313,7 @@ void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) * double buffered update to take place. * Returns the current update pending status. */ -u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); @@ -1375,9 +1345,23 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) /* Unlock the lock, so double-buffering can take place inside vblank */ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); +} + +/** + * evergreen_page_flip_pending - check if page flip is still pending + * + * @rdev: radeon_device pointer + * @crtc_id: crtc to check + * + * Returns the current update pending status. + */ +bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; /* Return current update_pending status: */ - return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; + return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & + EVERGREEN_GRPH_SURFACE_UPDATE_PENDING); } /* get temperature in millidegrees */ @@ -1680,7 +1664,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) case RADEON_HPD_6: if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) connected = true; - break; + break; default: break; } @@ -2658,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); @@ -2990,8 +2975,6 @@ static int evergreen_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_BASE, ring->gpu_addr >> 8); WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); - ring->rptr = RREG32(CP_RB_RPTR); - evergreen_cp_start(rdev); ring->ready = true; r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); @@ -3355,6 +3338,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev) disabled_rb_mask &= ~(1 << i); } + for (i = 0; i < rdev->config.evergreen.num_ses; i++) { + u32 simd_disable_bitmap; + + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; + simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds; + tmp <<= 16; + tmp |= simd_disable_bitmap; + } + rdev->config.evergreen.active_simds = hweight32(~tmp); + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); @@ -3952,11 +3947,9 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -4375,7 +4368,6 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; u32 dma_cntl, dma_cntl1 = 0; u32 thermal_int = 0; @@ -4558,15 +4550,21 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -4758,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -4809,7 +4808,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); + radeon_crtc_handle_vblank(rdev, 0); rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -4835,7 +4834,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); + radeon_crtc_handle_vblank(rdev, 1); rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } @@ -4861,7 +4860,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_flip(rdev, 2); + radeon_crtc_handle_vblank(rdev, 2); rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); } @@ -4887,7 +4886,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_flip(rdev, 3); + radeon_crtc_handle_vblank(rdev, 3); rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); } @@ -4913,7 +4912,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_flip(rdev, 4); + radeon_crtc_handle_vblank(rdev, 4); rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); } @@ -4939,7 +4938,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_flip(rdev, 5); + radeon_crtc_handle_vblank(rdev, 5); rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); } @@ -4955,6 +4954,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: @@ -5060,14 +5068,16 @@ restart_ih: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); cayman_vm_decode_fault(rdev, status, addr); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ @@ -5299,7 +5309,8 @@ int evergreen_resume(struct radeon_device *rdev) /* init golden registers */ evergreen_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = evergreen_startup(rdev); @@ -5475,9 +5486,9 @@ void evergreen_fini(struct radeon_device *rdev) radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - evergreen_pcie_gart_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + evergreen_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index c7cac07f139..5c8b358f9fb 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -1165,7 +1165,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case DB_DEPTH_CONTROL: track->db_depth_control = radeon_get_ib_value(p, idx); @@ -1196,12 +1196,12 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } ib[idx] &= ~Z_ARRAY_MODE(0xf); track->db_z_info &= ~Z_ARRAY_MODE(0xf); - ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); - track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); + track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); + if (reloc->tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; - evergreen_tiling_fields(reloc->lobj.tiling_flags, + evergreen_tiling_fields(reloc->tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); @@ -1237,7 +1237,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } track->db_z_read_offset = radeon_get_ib_value(p, idx); - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->db_z_read_bo = reloc->robj; track->db_dirty = true; break; @@ -1249,7 +1249,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } track->db_z_write_offset = radeon_get_ib_value(p, idx); - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->db_z_write_bo = reloc->robj; track->db_dirty = true; break; @@ -1261,7 +1261,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } track->db_s_read_offset = radeon_get_ib_value(p, idx); - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->db_s_read_bo = reloc->robj; track->db_dirty = true; break; @@ -1273,7 +1273,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } track->db_s_write_offset = radeon_get_ib_value(p, idx); - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->db_s_write_bo = reloc->robj; track->db_dirty = true; break; @@ -1297,7 +1297,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->vgt_strmout_bo[tmp] = reloc->robj; track->streamout_dirty = true; break; @@ -1317,7 +1317,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); case CB_TARGET_MASK: track->cb_target_mask = radeon_get_ib_value(p, idx); track->cb_dirty = true; @@ -1381,8 +1381,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); } track->cb_dirty = true; break; @@ -1399,8 +1399,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); } track->cb_dirty = true; break; @@ -1461,10 +1461,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + if (reloc->tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; - evergreen_tiling_fields(reloc->lobj.tiling_flags, + evergreen_tiling_fields(reloc->tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); @@ -1489,10 +1489,10 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + if (reloc->tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; - evergreen_tiling_fields(reloc->lobj.tiling_flags, + evergreen_tiling_fields(reloc->tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); @@ -1520,7 +1520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->cb_color_fmask_bo[tmp] = reloc->robj; break; case CB_COLOR0_CMASK: @@ -1537,7 +1537,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->cb_color_cmask_bo[tmp] = reloc->robj; break; case CB_COLOR0_FMASK_SLICE: @@ -1578,7 +1578,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } tmp = (reg - CB_COLOR0_BASE) / 0x3c; track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->cb_color_bo[tmp] = reloc->robj; track->cb_dirty = true; break; @@ -1594,7 +1594,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->cb_color_bo[tmp] = reloc->robj; track->cb_dirty = true; break; @@ -1606,7 +1606,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } track->htile_offset = radeon_get_ib_value(p, idx); - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->htile_bo = reloc->robj; track->db_dirty = true; break; @@ -1723,7 +1723,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MEMORY_EXPORT_BASE: if (p->rdev->family >= CHIP_CAYMAN) { @@ -1737,7 +1737,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case CAYMAN_SX_SCATTER_EXPORT_BASE: if (p->rdev->family < CHIP_CAYMAN) { @@ -1751,7 +1751,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MISC: track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; @@ -1836,7 +1836,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (idx_value & 0xfffffff0) + ((u64)(tmp & 0xff) << 32); @@ -1882,7 +1882,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + idx_value + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); @@ -1909,7 +1909,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + idx_value + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); @@ -1937,7 +1937,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + radeon_get_ib_value(p, idx+1) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -2027,7 +2027,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad DISPATCH_INDIRECT\n"); return -EINVAL; } - ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); + ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff); r = evergreen_cs_track_check(p); if (r) { dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); @@ -2049,7 +2049,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, tmp = radeon_get_ib_value(p, idx) + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); - offset = reloc->lobj.gpu_offset + tmp; + offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", @@ -2144,7 +2144,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, tmp = radeon_get_ib_value(p, idx+2) + ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); - offset = reloc->lobj.gpu_offset + tmp; + offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", @@ -2174,7 +2174,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } - ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_EVENT_WRITE: @@ -2190,7 +2190,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -2212,7 +2212,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -2234,7 +2234,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -2302,11 +2302,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { ib[idx+1+(i*8)+1] |= - TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); + if (reloc->tiling_flags & RADEON_TILING_MACRO) { unsigned bankw, bankh, mtaspect, tile_split; - evergreen_tiling_fields(reloc->lobj.tiling_flags, + evergreen_tiling_fields(reloc->tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split); @@ -2318,7 +2318,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } } texture = reloc->robj; - toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); /* tex mip base */ tex_dim = ib[idx+1+(i*8)+0] & 0x7; @@ -2337,7 +2337,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad SET_RESOURCE (tex)\n"); return -EINVAL; } - moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); mipmap = reloc->robj; } @@ -2364,7 +2364,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; } - offset64 = reloc->lobj.gpu_offset + offset; + offset64 = reloc->gpu_offset + offset; ib[idx+1+(i*8)+0] = offset64; ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | (upper_32_bits(offset64) & 0xff); @@ -2445,7 +2445,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } @@ -2464,7 +2464,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } @@ -2493,7 +2493,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; break; @@ -2518,7 +2518,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } else { @@ -2542,7 +2542,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } else { @@ -2717,7 +2717,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); p->idx += count + 7; break; /* linear */ @@ -2725,8 +2725,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; p->idx += count + 3; break; default: @@ -2768,10 +2768,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; - ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; + ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 5; break; /* Copy L2T/T2L */ @@ -2781,22 +2781,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* tiled src, linear dst */ src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; - ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); dst_offset = radeon_get_ib_value(p, idx + 7); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; - ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; } else { /* linear src, tiled dst */ src_offset = radeon_get_ib_value(p, idx+7); src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; - ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", @@ -2827,10 +2827,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset + count, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); - ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); - ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; - ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff); + ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff); + ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; + ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 5; break; /* Copy L2L, partial */ @@ -2840,10 +2840,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("L2L Partial is cayman only !\n"); return -EINVAL; } - ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); - ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); - ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff); + ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff; + ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff); + ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; p->idx += 9; break; @@ -2876,12 +2876,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; - ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff; - ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc); + ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; + ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff; + ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 7; break; /* Copy L2T Frame to Field */ @@ -2916,10 +2916,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); - ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); - ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); + ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8); + ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 10; break; /* Copy L2T/T2L, partial */ @@ -2932,16 +2932,16 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* detile bit */ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { /* tiled src, linear dst */ - ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); - ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; } else { /* linear src, tiled dst */ - ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } p->idx += 12; break; @@ -2978,10 +2978,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); - ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); - ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); + ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8); + ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 10; break; /* Copy L2T/T2L (tile units) */ @@ -2992,22 +2992,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* tiled src, linear dst */ src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; - ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); dst_offset = radeon_get_ib_value(p, idx+7); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; - ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; } else { /* linear src, tiled dst */ src_offset = radeon_get_ib_value(p, idx+7); src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; - ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", @@ -3028,8 +3028,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("L2T, T2L Partial is cayman only !\n"); return -EINVAL; } - ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); - ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); + ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8); p->idx += 13; break; /* Copy L2T broadcast (tile units) */ @@ -3065,10 +3065,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); - ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8); - ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); + ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8); + ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 10; break; default: @@ -3089,8 +3089,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000; p->idx += 4; break; case DMA_PACKET_NOP: diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index a37b5443638..478caefe0fe 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -174,11 +175,9 @@ bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); if (!(reset_mask & RADEON_RESET_DMA)) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 0c6d5cef4cf..1ec0e6e83f9 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -293,10 +293,13 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; struct hdmi_avi_infoframe frame; uint32_t offset; ssize_t err; + uint32_t val; + int bpc = 8; if (!dig || !dig->afmt) return; @@ -306,6 +309,21 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode return; offset = dig->afmt->offset; + /* hdmi deep color mode general control packets setup, if bpc > 8 */ + if (encoder->crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + bpc = radeon_crtc->bpc; + } + + /* disable audio prior to setting up hw */ + if (ASIC_IS_DCE6(rdev)) { + dig->afmt->pin = dce6_audio_get_pin(rdev); + dce6_audio_enable(rdev, dig->afmt->pin, false); + } else { + dig->afmt->pin = r600_audio_get_pin(rdev); + r600_audio_enable(rdev, dig->afmt->pin, false); + } + evergreen_audio_set_dto(encoder, mode->clock); WREG32(HDMI_VBI_PACKET_CONTROL + offset, @@ -313,6 +331,35 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000); + val = RREG32(HDMI_CONTROL + offset); + val &= ~HDMI_DEEP_COLOR_ENABLE; + val &= ~HDMI_DEEP_COLOR_DEPTH_MASK; + + switch (bpc) { + case 0: + case 6: + case 8: + case 16: + default: + DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", + connector->name, bpc); + break; + case 10: + val |= HDMI_DEEP_COLOR_ENABLE; + val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR); + DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", + connector->name); + break; + case 12: + val |= HDMI_DEEP_COLOR_ENABLE; + val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR); + DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", + connector->name); + break; + } + + WREG32(HDMI_CONTROL + offset, val); + WREG32(HDMI_VBI_PACKET_CONTROL + offset, HDMI_NULL_SEND | /* send null packets when required */ HDMI_GC_SEND | /* send general control packets */ @@ -339,9 +386,13 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ - WREG32(HDMI_ACR_PACKET_CONTROL + offset, - HDMI_ACR_SOURCE | /* select SW CTS value */ - HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ + if (bpc > 8) + WREG32(HDMI_ACR_PACKET_CONTROL + offset, + HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ + else + WREG32(HDMI_ACR_PACKET_CONTROL + offset, + HDMI_ACR_SOURCE | /* select SW CTS value */ + HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ evergreen_hdmi_update_ACR(encoder, mode->clock); @@ -409,12 +460,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); + + /* enable audio after to setting up hw */ + if (ASIC_IS_DCE6(rdev)) + dce6_audio_enable(rdev, dig->afmt->pin, true); + else + r600_audio_enable(rdev, dig->afmt->pin, true); } void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) { - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; @@ -427,15 +482,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; - if (enable) { - if (ASIC_IS_DCE6(rdev)) - dig->afmt->pin = dce6_audio_get_pin(rdev); - else - dig->afmt->pin = r600_audio_get_pin(rdev); - } else { - dig->afmt->pin = NULL; - } - dig->afmt->enabled = enable; DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index a0f63ff5a5e..23bff590fb6 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -116,6 +116,8 @@ # define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 # define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2 # define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4 +#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x6808 +# define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8) #define EVERGREEN_GRPH_SWAP_CONTROL 0x680c # define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) # define EVERGREEN_GRPH_ENDIAN_NONE 0 @@ -237,7 +239,6 @@ # define EVERGREEN_CRTC_V_BLANK (1 << 0) #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 #define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 -#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h index 76ada8cfe90..3a03ba37d04 100644 --- a/drivers/gpu/drm/radeon/evergreen_smc.h +++ b/drivers/gpu/drm/radeon/evergreen_smc.h @@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 -#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 +#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index f9c7963b3ee..b066d6711b8 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -517,10 +517,11 @@ # define HDMI_ERROR_ACK (1 << 8) # define HDMI_ERROR_MASK (1 << 9) # define HDMI_DEEP_COLOR_ENABLE (1 << 24) -# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28) +# define HDMI_DEEP_COLOR_DEPTH(x) (((x) & 3) << 28) # define HDMI_24BIT_DEEP_COLOR 0 # define HDMI_30BIT_DEEP_COLOR 1 # define HDMI_36BIT_DEEP_COLOR 2 +# define HDMI_DEEP_COLOR_DEPTH_MASK (3 << 28) #define HDMI_STATUS 0x7034 # define HDMI_ACTIVE_AVMUTE (1 << 0) # define HDMI_AUDIO_PACKET_ERROR (1 << 16) diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b6e01d5d2cc..9ef8c38f2d6 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev, return 0; } +static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_2bit) +{ + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + if (vid_2bit < vddc_sclk_table->count) + return vddc_sclk_table->entries[vid_2bit].v; + else + return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) + return vid_mapping_table->entries[i].vid_7bit; + } + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; + } +} + +static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_7bit) +{ + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + for (i = 0; i < vddc_sclk_table->count; i++) { + if (vddc_sclk_table->entries[i].v == vid_7bit) + return i; + } + return vddc_sclk_table->count - 1; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) + return vid_mapping_table->entries[i].vid_2bit; + } + + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; + } +} + static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, u16 voltage) { @@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, u32 vid_2bit) { struct kv_power_info *pi = kv_get_pi(rdev); - u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, - &pi->sys_info.vid_mapping_table, - vid_2bit); + u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, + &pi->sys_info.vid_mapping_table, + vid_2bit); return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); } @@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev) static int kv_unforce_levels(struct radeon_device *rdev) { - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); else return kv_set_enabled_levels(rdev); @@ -1223,7 +1269,7 @@ int kv_dpm_enable(struct radeon_device *rdev) int kv_dpm_late_enable(struct radeon_device *rdev) { - int ret; + int ret = 0; if (rdev->irq.installed && r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { @@ -1338,13 +1384,11 @@ static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); } -#if 0 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) { return kv_notify_message_to_smu(rdev, enable ? PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); } -#endif static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) { @@ -1364,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) struct radeon_uvd_clock_voltage_dependency_table *table = &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; int ret; + u32 mask; if (!gate) { - if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) + if (table->count) pi->uvd_boot_level = table->count - 1; else pi->uvd_boot_level = 0; + if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { + mask = 1 << pi->uvd_boot_level; + } else { + mask = 0x1f; + } + ret = kv_copy_bytes_to_smc(rdev, pi->dpm_table_start + offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), @@ -1379,17 +1430,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) if (ret) return ret; - if (!pi->caps_uvd_dpm || - pi->caps_stable_p_state) - kv_send_msg_to_smc_with_parameter(rdev, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (1 << pi->uvd_boot_level)); + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + mask); } return kv_enable_uvd_dpm(rdev, !gate); } -#if 0 static u8 kv_get_vce_boot_level(struct radeon_device *rdev) { u8 i; @@ -1414,6 +1462,9 @@ static int kv_update_vce_dpm(struct radeon_device *rdev, int ret; if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { + kv_dpm_powergate_vce(rdev, false); + /* turn the clocks on when encoding */ + cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else @@ -1436,11 +1487,13 @@ static int kv_update_vce_dpm(struct radeon_device *rdev, kv_enable_vce_dpm(rdev, true); } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { kv_enable_vce_dpm(rdev, false); + /* turn the clocks off when not encoding */ + cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); + kv_dpm_powergate_vce(rdev, true); } return 0; } -#endif static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) { @@ -1575,11 +1628,16 @@ static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) pi->vce_power_gated = gate; if (gate) { - if (pi->caps_vce_pg) + if (pi->caps_vce_pg) { + /* XXX do we need a vce_v1_0_stop() ? */ kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); + } } else { - if (pi->caps_vce_pg) + if (pi->caps_vce_pg) { kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); + vce_v2_0_resume(rdev); + vce_v1_0_start(rdev); + } } } @@ -1610,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) if (pi->acp_power_gated == gate) return; - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return; pi->acp_power_gated = gate; @@ -1768,7 +1826,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); struct radeon_ps *new_ps = &pi->requested_rps; - /*struct radeon_ps *old_ps = &pi->current_rps;*/ + struct radeon_ps *old_ps = &pi->current_rps; int ret; if (pi->bapm_enable) { @@ -1779,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) } } - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { if (pi->enable_dpm) { kv_set_valid_clock_range(rdev, new_ps); kv_update_dfs_bypass_settings(rdev, new_ps); @@ -1798,14 +1856,15 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) kv_set_enabled_levels(rdev); kv_force_lowest_valid(rdev); kv_unforce_levels(rdev); -#if 0 + ret = kv_update_vce_dpm(rdev, new_ps, old_ps); if (ret) { DRM_ERROR("kv_update_vce_dpm failed\n"); return ret; } -#endif kv_update_sclk_t(rdev); + if (rdev->family == CHIP_MULLINS) + kv_enable_nb_dpm(rdev); } } else { if (pi->enable_dpm) { @@ -1823,13 +1882,11 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) kv_program_nbps_index_settings(rdev, new_ps); kv_freeze_sclk_dpm(rdev, false); kv_set_enabled_levels(rdev); -#if 0 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); if (ret) { DRM_ERROR("kv_update_vce_dpm failed\n"); return ret; } -#endif kv_update_acp_boot_level(rdev); kv_update_sclk_t(rdev); kv_enable_nb_dpm(rdev); @@ -1858,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { kv_force_lowest_valid(rdev); kv_init_graphics_levels(rdev); kv_program_bootup_state(rdev); @@ -1897,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev, static void kv_patch_voltage_values(struct radeon_device *rdev) { int i; - struct radeon_uvd_clock_voltage_dependency_table *table = + struct radeon_uvd_clock_voltage_dependency_table *uvd_table = &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct radeon_vce_clock_voltage_dependency_table *vce_table = + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table *samu_table = + &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table *acp_table = + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + if (uvd_table->count) { + for (i = 0; i < uvd_table->count; i++) + uvd_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + uvd_table->entries[i].v); + } + + if (vce_table->count) { + for (i = 0; i < vce_table->count; i++) + vce_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + vce_table->entries[i].v); + } + + if (samu_table->count) { + for (i = 0; i < samu_table->count; i++) + samu_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + samu_table->entries[i].v); + } - if (table->count) { - for (i = 0; i < table->count; i++) - table->entries[i].v = + if (acp_table->count) { + for (i = 0; i < acp_table->count; i++) + acp_table->entries[i].v = kv_convert_8bit_index_to_voltage(rdev, - table->entries[i].v); + acp_table->entries[i].v); } } @@ -1937,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev) break; } - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); else return kv_set_enabled_level(rdev, i); @@ -1957,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev) break; } - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); else return kv_set_enabled_level(rdev, i); @@ -2037,6 +2121,14 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_clock_and_voltage_limits *max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + if (new_rps->vce_active) { + new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; + new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; + } else { + new_rps->evclk = 0; + new_rps->ecclk = 0; + } + mclk = max_limits->mclk; sclk = min_sclk; @@ -2056,6 +2148,11 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, sclk = stable_p_state_sclk; } + if (new_rps->vce_active) { + if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) + sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; + } + ps->need_dfs_bypass = true; for (i = 0; i < ps->num_levels; i++) { @@ -2092,7 +2189,8 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, } } - pi->video_start = new_rps->dclk || new_rps->vclk; + pi->video_start = new_rps->dclk || new_rps->vclk || + new_rps->evclk || new_rps->ecclk; if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) @@ -2100,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, else pi->battery_state = false; - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { ps->dpm0_pg_nb_ps_lo = 0x1; ps->dpm0_pg_nb_ps_hi = 0x0; ps->dpmx_nb_ps_lo = 0x1; @@ -2161,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) if (pi->lowest_valid > pi->highest_valid) return -EINVAL; - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { pi->graphics_level[i].GnbSlow = 1; pi->graphics_level[i].ForceNbPs1 = 0; @@ -2235,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev) break; kv_set_divider_value(rdev, i, table->entries[i].clk); - vid_2bit = sumo_convert_vid7_to_vid2(rdev, - &pi->sys_info.vid_mapping_table, - table->entries[i].v); + vid_2bit = kv_convert_vid7_to_vid2(rdev, + &pi->sys_info.vid_mapping_table, + table->entries[i].v); kv_set_vid(rdev, i, vid_2bit); kv_set_at(rdev, i, pi->at[i]); kv_dpm_power_level_enabled_for_throttle(rdev, i, true); @@ -2306,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev, struct kv_power_info *pi = kv_get_pi(rdev); u32 nbdpmconfig1; - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return; if (pi->sys_info.nb_dpm_enable) { @@ -2538,9 +2636,6 @@ static int kv_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -2577,6 +2672,19 @@ static int kv_parse_power_table(struct radeon_device *rdev) power_state_offset += 2 + power_state->v2.ucNumDPMLevels; } rdev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { + u32 sclk; + clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + rdev->pm.dpm.vce_states[i].sclk = sclk; + rdev->pm.dpm.vce_states[i].mclk = 0; + } + return 0; } @@ -2590,6 +2698,10 @@ int kv_dpm_init(struct radeon_device *rdev) return -ENOMEM; rdev->pm.dpm.priv = pi; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = r600_parse_extended_power_table(rdev); if (ret) return ret; @@ -2599,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev) pi->sram_end = SMC_RAM_END; - if (rdev->family == CHIP_KABINI) - pi->high_voltage_t = 4001; - pi->enable_nb_dpm = true; pi->caps_power_containment = true; @@ -2617,13 +2726,13 @@ int kv_dpm_init(struct radeon_device *rdev) pi->caps_sclk_ds = true; pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; - pi->bapm_enable = false; + pi->bapm_enable = true; pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ pi->caps_uvd_pg = true; pi->caps_uvd_dpm = true; - pi->caps_vce_pg = false; + pi->caps_vce_pg = false; /* XXX true */ pi->caps_samu_pg = false; pi->caps_acp_pg = false; pi->caps_stable_p_state = false; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ea932ac66fc..5a33ca68186 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1057,6 +1057,18 @@ static void cayman_gpu_init(struct radeon_device *rdev) disabled_rb_mask &= ~(1 << i); } + for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) { + u32 simd_disable_bitmap; + + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; + simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se; + tmp <<= 16; + tmp |= simd_disable_bitmap; + } + rdev->config.cayman.active_simds = hweight32(~tmp); + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); @@ -1228,12 +1240,14 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | + ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | EFFECTIVE_L2_QUEUE_SIZE(7) | CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | + BANK_SELECT(6) | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); @@ -1266,6 +1280,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | @@ -1343,7 +1358,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev, /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); @@ -1642,8 +1657,8 @@ static int cayman_cp_resume(struct radeon_device *rdev) ring = &rdev->ring[ridx[i]]; WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); - ring->rptr = ring->wptr = 0; - WREG32(cp_rb_rptr[i], ring->rptr); + ring->wptr = 0; + WREG32(cp_rb_rptr[i], 0); WREG32(cp_rb_wptr[i], ring->wptr); mdelay(1); @@ -1917,11 +1932,9 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -2105,7 +2118,8 @@ int cayman_resume(struct radeon_device *rdev) /* init golden registers */ ni_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cayman_startup(rdev); diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 7cf96b15377..6378e027669 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -248,8 +248,6 @@ int cayman_dma_resume(struct radeon_device *rdev) ring->wptr = 0; WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); - ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; - WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); ring->ready = true; @@ -302,11 +300,9 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) mask = RADEON_RESET_DMA1; if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index c351226ecb3..01fc4888e6f 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } } @@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev, if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct ni_ps *ps = ni_get_ps(rps); - u16 vddc; struct rv7xx_pl *pl = &ps->performance_levels[index]; ps->performance_level_count = index + 1; @@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, /* patch up vddc if necessary */ if (pl->vddc == 0xff01) { - if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) - pl->vddc = vddc; + if (pi->max_vddc) + pl->vddc = pi->max_vddc; } if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { @@ -4026,9 +4025,6 @@ static int ni_parse_power_table(struct radeon_device *rdev) power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < power_info->pplib.ucNumStates; i++) { power_state = (union pplib_power_state *) @@ -4090,6 +4086,10 @@ int ni_dpm_init(struct radeon_device *rdev) pi->min_vddc_in_table = 0; pi->max_vddc_in_table = 0; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = ni_parse_power_table(rdev); if (ret) return ret; @@ -4322,7 +4322,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev, void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; struct ni_ps *ps = ni_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index d996033c243..2e12e4d6925 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -128,6 +128,7 @@ #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) +#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24) #define VM_CONTEXT1_CNTL 0x1414 #define VM_CONTEXT0_CNTL2 0x1430 #define VM_CONTEXT1_CNTL2 0x1434 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ef024ce3f7c..1544efcf1c3 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -142,36 +142,6 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) } /** - * r100_pre_page_flip - pre-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to prepare for pageflip on - * - * Pre-pageflip callback (r1xx-r4xx). - * Enables the pageflip irq (vblank irq). - */ -void r100_pre_page_flip(struct radeon_device *rdev, int crtc) -{ - /* enable the pflip int */ - radeon_irq_kms_pflip_irq_get(rdev, crtc); -} - -/** - * r100_post_page_flip - pos-pageflip callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to cleanup pageflip on - * - * Post-pageflip callback (r1xx-r4xx). - * Disables the pageflip irq (vblank irq). - */ -void r100_post_page_flip(struct radeon_device *rdev, int crtc) -{ - /* disable the pflip int */ - radeon_irq_kms_pflip_irq_put(rdev, crtc); -} - -/** * r100_page_flip - pageflip callback. * * @rdev: radeon_device pointer @@ -182,9 +152,8 @@ void r100_post_page_flip(struct radeon_device *rdev, int crtc) * During vblank we take the crtc lock and wait for the update_pending * bit to go high, when it does, we release the lock, and allow the * double buffered update to take place. - * Returns the current update pending status. */ -u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; @@ -206,8 +175,24 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); +} + +/** + * r100_page_flip_pending - check if page flip is still pending + * + * @rdev: radeon_device pointer + * @crtc_id: crtc to check + * + * Check if the last pagefilp is still pending (r1xx-r4xx). + * Returns the current update pending status. + */ +bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + /* Return current update_pending status: */ - return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; + return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & + RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET); } /** @@ -697,15 +682,11 @@ void r100_pci_gart_disable(struct radeon_device *rdev) WREG32(RADEON_AIC_HI_ADDR, 0); } -int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) +void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr) { u32 *gtt = rdev->gart.ptr; - - if (i < 0 || i > rdev->gart.num_gpu_pages) { - return -EINVAL; - } gtt[i] = cpu_to_le32(lower_32_bits(addr)); - return 0; } void r100_pci_gart_fini(struct radeon_device *rdev) @@ -794,7 +775,7 @@ int r100_irq_process(struct radeon_device *rdev) wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); + radeon_crtc_handle_vblank(rdev, 0); } if (status & RADEON_CRTC2_VBLANK_STAT) { if (rdev->irq.crtc_vblank_int[1]) { @@ -803,7 +784,7 @@ int r100_irq_process(struct radeon_device *rdev) wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); + radeon_crtc_handle_vblank(rdev, 1); } if (status & RADEON_FP_DETECT_STAT) { queue_hotplug = true; @@ -1193,7 +1174,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) WREG32(RADEON_CP_RB_CNTL, tmp); udelay(10); - ring->rptr = RREG32(RADEON_CP_RB_RPTR); /* Set cp mode to bus mastering & enable cp*/ WREG32(RADEON_CP_CSQ_MODE, REG_SET(RADEON_INDIRECT2_START, indirect2_start) | @@ -1275,12 +1255,12 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, value = radeon_get_ib_value(p, idx); tmp = value & 0x003fffff; - tmp += (((u32)reloc->lobj.gpu_offset) >> 10); + tmp += (((u32)reloc->gpu_offset) >> 10); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= RADEON_DST_TILE_MACRO; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reloc->tiling_flags & RADEON_TILING_MICRO) { if (reg == RADEON_SRC_PITCH_OFFSET) { DRM_ERROR("Cannot src blit from microtiled surface\n"); radeon_cs_dump_packet(p, pkt); @@ -1326,7 +1306,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, return r; } idx_value = radeon_get_ib_value(p, idx); - ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); + ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); track->arrays[i + 0].esize = idx_value >> 8; track->arrays[i + 0].robj = reloc->robj; @@ -1338,7 +1318,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, radeon_cs_dump_packet(p, pkt); return r; } - ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); + ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset); track->arrays[i + 1].robj = reloc->robj; track->arrays[i + 1].esize = idx_value >> 24; track->arrays[i + 1].esize &= 0x7F; @@ -1352,7 +1332,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, return r; } idx_value = radeon_get_ib_value(p, idx); - ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); + ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); track->arrays[i + 0].robj = reloc->robj; track->arrays[i + 0].esize = idx_value >> 8; track->arrays[i + 0].esize &= 0x7F; @@ -1595,7 +1575,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->zb.robj = reloc->robj; track->zb.offset = idx_value; track->zb_dirty = true; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case RADEON_RB3D_COLOROFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); @@ -1608,7 +1588,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->cb[0].robj = reloc->robj; track->cb[0].offset = idx_value; track->cb_dirty = true; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case RADEON_PP_TXOFFSET_0: case RADEON_PP_TXOFFSET_1: @@ -1622,16 +1602,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p, return r; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= RADEON_TXO_MACRO_TILE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + if (reloc->tiling_flags & RADEON_TILING_MICRO) tile_flags |= RADEON_TXO_MICRO_TILE_X2; tmp = idx_value & ~(0x7 << 2); tmp |= tile_flags; - ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); + ib[idx] = tmp + ((u32)reloc->gpu_offset); } else - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); track->textures[i].robj = reloc->robj; track->tex_dirty = true; break; @@ -1649,7 +1629,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, return r; } track->textures[0].cube_info[i].offset = idx_value; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); track->textures[0].cube_info[i].robj = reloc->robj; track->tex_dirty = true; break; @@ -1667,7 +1647,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, return r; } track->textures[1].cube_info[i].offset = idx_value; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); track->textures[1].cube_info[i].robj = reloc->robj; track->tex_dirty = true; break; @@ -1685,7 +1665,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, return r; } track->textures[2].cube_info[i].offset = idx_value; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); track->textures[2].cube_info[i].robj = reloc->robj; track->tex_dirty = true; break; @@ -1703,9 +1683,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p, return r; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= RADEON_COLOR_TILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + if (reloc->tiling_flags & RADEON_TILING_MICRO) tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; tmp = idx_value & ~(0x7 << 16); @@ -1773,7 +1753,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, radeon_cs_dump_packet(p, pkt); return r; } - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case RADEON_PP_CNTL: { @@ -1933,7 +1913,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, radeon_cs_dump_packet(p, pkt); return r; } - ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); + ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset); r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); if (r) { return r; @@ -1947,7 +1927,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, radeon_cs_dump_packet(p, pkt); return r; } - ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); + ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset); track->num_arrays = 1; track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); @@ -2523,11 +2503,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) rbbm_status = RREG32(R_000E40_RBBM_STATUS); if (!G_000E40_GUI_ACTIVE(rbbm_status)) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -3223,12 +3201,12 @@ void r100_bandwidth_update(struct radeon_device *rdev) if (rdev->mode_info.crtcs[0]->base.enabled) { mode1 = &rdev->mode_info.crtcs[0]->base.mode; - pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; + pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; } if (!(rdev->flags & RADEON_SINGLE_CRTC)) { if (rdev->mode_info.crtcs[1]->base.enabled) { mode2 = &rdev->mode_info.crtcs[1]->base.mode; - pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; + pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; } } @@ -3942,8 +3920,6 @@ int r100_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r100_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index b3807edb193..58f0473aa73 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -185,7 +185,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->zb.robj = reloc->robj; track->zb.offset = idx_value; track->zb_dirty = true; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case RADEON_RB3D_COLOROFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); @@ -198,7 +198,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->cb[0].robj = reloc->robj; track->cb[0].offset = idx_value; track->cb_dirty = true; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case R200_PP_TXOFFSET_0: case R200_PP_TXOFFSET_1: @@ -215,16 +215,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, return r; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= R200_TXO_MACRO_TILE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + if (reloc->tiling_flags & RADEON_TILING_MICRO) tile_flags |= R200_TXO_MICRO_TILE; tmp = idx_value & ~(0x7 << 2); tmp |= tile_flags; - ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); + ib[idx] = tmp + ((u32)reloc->gpu_offset); } else - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); track->textures[i].robj = reloc->robj; track->tex_dirty = true; break; @@ -268,7 +268,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, return r; } track->textures[i].cube_info[face - 1].offset = idx_value; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); track->textures[i].cube_info[face - 1].robj = reloc->robj; track->tex_dirty = true; break; @@ -287,9 +287,9 @@ int r200_packet0_check(struct radeon_cs_parser *p, } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= RADEON_COLOR_TILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + if (reloc->tiling_flags & RADEON_TILING_MICRO) tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; tmp = idx_value & ~(0x7 << 16); @@ -362,7 +362,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, radeon_cs_dump_packet(p, pkt); return r; } - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case RADEON_PP_CNTL: { diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7c63ef840e8..3c21d77a483 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -72,13 +72,11 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) #define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_READABLE (1 << 3) -int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr) { void __iomem *ptr = rdev->gart.ptr; - if (i < 0 || i > rdev->gart.num_gpu_pages) { - return -EINVAL; - } addr = (lower_32_bits(addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 24) | R300_PTE_WRITEABLE | R300_PTE_READABLE; @@ -86,7 +84,6 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) * on powerpc without HW swappers, it'll get swapped on way * into VRAM - so no need for cpu_to_le32 on VRAM tables */ writel(addr, ((void __iomem *)ptr) + (i * 4)); - return 0; } int rv370_pcie_gart_init(struct radeon_device *rdev) @@ -640,7 +637,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->cb[i].robj = reloc->robj; track->cb[i].offset = idx_value; track->cb_dirty = true; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case R300_ZB_DEPTHOFFSET: r = radeon_cs_packet_next_reloc(p, &reloc, 0); @@ -653,7 +650,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->zb.robj = reloc->robj; track->zb.offset = idx_value; track->zb_dirty = true; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case R300_TX_OFFSET_0: case R300_TX_OFFSET_0+4: @@ -682,16 +679,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ - ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); + ((idx_value & ~31) + (u32)reloc->gpu_offset); } else { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= R300_TXO_MACRO_TILE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + if (reloc->tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_TXO_MICRO_TILE; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) tile_flags |= R300_TXO_MICRO_TILE_SQUARE; - tmp = idx_value + ((u32)reloc->lobj.gpu_offset); + tmp = idx_value + ((u32)reloc->gpu_offset); tmp |= tile_flags; ib[idx] = tmp; } @@ -753,11 +750,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return r; } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= R300_COLOR_TILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + if (reloc->tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_COLOR_MICROTILE_ENABLE; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; tmp = idx_value & ~(0x7 << 16); @@ -838,11 +835,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return r; } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) tile_flags |= R300_DEPTHMACROTILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + if (reloc->tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_DEPTHMICROTILE_TILED; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; tmp = idx_value & ~(0x7 << 16); @@ -1052,7 +1049,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, radeon_cs_dump_packet(p, pkt); return r; } - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case 0x4e0c: /* RB3D_COLOR_CHANNEL_MASK */ @@ -1097,7 +1094,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->aa.robj = reloc->robj; track->aa.offset = idx_value; track->aa_dirty = true; - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case R300_RB3D_AARESOLVE_PITCH: track->aa.pitch = idx_value & 0x3FFE; @@ -1162,7 +1159,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, radeon_cs_dump_packet(p, pkt); return r; } - ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); + ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); if (r) { return r; @@ -1430,8 +1427,6 @@ int r300_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r300_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3768aab2710..802b19220a2 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r420_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 1dd0d32993d..136b7bc7cd2 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -402,6 +402,7 @@ * block and vice versa. This applies to GRPH, CUR, etc. */ #define AVIVO_D1GRPH_LUT_SEL 0x6108 +# define AVIVO_LUT_10BIT_BYPASS_EN (1 << 8) #define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 #define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 #define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e209eb75024..98d6053c36c 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r520_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 56140b4e5bb..3c69f58e46e 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1748,11 +1748,9 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -1960,6 +1958,9 @@ static void r600_gpu_init(struct radeon_device *rdev) if (tmp < rdev->config.r600.max_simds) { rdev->config.r600.max_simds = tmp; } + tmp = rdev->config.r600.max_simds - + r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); + rdev->config.r600.active_simds = tmp; disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; @@ -2604,8 +2605,6 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_BASE, ring->gpu_addr >> 8); WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); - ring->rptr = RREG32(CP_RB_RPTR); - r600_cp_start(rdev); ring->ready = true; r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); @@ -2728,7 +2727,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev, /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); @@ -2767,7 +2766,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, sel |= PACKET3_SEM_WAIT_ON_SIGNAL; radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); return true; @@ -2828,9 +2827,9 @@ int r600_copy_cpdma(struct radeon_device *rdev, if (size_in_bytes == 0) tmp |= PACKET3_CP_DMA_CP_SYNC; radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); - radeon_ring_write(ring, src_offset & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(src_offset)); radeon_ring_write(ring, tmp); - radeon_ring_write(ring, dst_offset & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(dst_offset)); radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); radeon_ring_write(ring, cur_size_in_bytes); src_offset += cur_size_in_bytes; @@ -2843,6 +2842,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev) /* post card */ atom_asic_init(rdev->mode_info.atom_context); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = r600_startup(rdev); @@ -3508,7 +3509,6 @@ int r600_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 hdmi0, hdmi1; - u32 d1grph = 0, d2grph = 0; u32 dma_cntl; u32 thermal_int = 0; @@ -3617,8 +3617,8 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DMA_CNTL, dma_cntl); WREG32(DxMODE_INT_MASK, mode_int); - WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); - WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); + WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); + WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); WREG32(GRBM_INT_CNTL, grbm_int_cntl); if (ASIC_IS_DCE3(rdev)) { WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -3879,7 +3880,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); + radeon_crtc_handle_vblank(rdev, 0); rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -3905,7 +3906,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); + radeon_crtc_handle_vblank(rdev, 1); rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } @@ -3921,6 +3922,14 @@ restart_ih: break; } break; + case 9: /* D1 pflip */ + DRM_DEBUG("IH: D1 flip\n"); + radeon_crtc_handle_flip(rdev, 0); + break; + case 11: /* D2 pflip */ + DRM_DEBUG("IH: D2 flip\n"); + radeon_crtc_handle_flip(rdev, 1); + break; case 19: /* HPD/DAC hotplug */ switch (src_data) { case 0: @@ -3991,6 +4000,10 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ case 178: /* CP_INT in IB2 */ diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 47fc2b88697..bffac10c429 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work) } /* enable the audio stream */ -static void r600_audio_enable(struct radeon_device *rdev, - struct r600_audio_pin *pin, - bool enable) +void r600_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable) { u32 value = 0; + if (!pin) + return; + if (ASIC_IS_DCE4(rdev)) { if (enable) { value |= 0x81000000; /* Required to enable audio */ @@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev, WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000); } - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); } /* @@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev) rdev->audio.pin[0].status_bits = 0; rdev->audio.pin[0].category_code = 0; rdev->audio.pin[0].id = 0; - - r600_audio_enable(rdev, &rdev->audio.pin[0], true); + /* disable audio. it will be set up later */ + r600_audio_enable(rdev, &rdev->audio.pin[0], false); return 0; } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 7b399dc5fd5..12511bb5fd6 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case R_008C64_SQ_VSTMP_RING_SIZE: case R_0288C8_SQ_GS_VERT_ITEMSIZE: /* get value to populate the IB don't remove */ - tmp =radeon_get_ib_value(p, idx); - ib[idx] = 0; + /*tmp =radeon_get_ib_value(p, idx); + ib[idx] = 0;*/ + break; + case SQ_ESGS_RING_BASE: + case SQ_GSVS_RING_BASE: + case SQ_ESTMP_RING_BASE: + case SQ_GSTMP_RING_BASE: + case SQ_PSTMP_RING_BASE: + case SQ_VSTMP_RING_BASE: + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SQ_CONFIG: track->sq_config = radeon_get_ib_value(p, idx); @@ -1029,7 +1043,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->db_depth_info = radeon_get_ib_value(p, idx); ib[idx] &= C_028010_ARRAY_MODE; track->db_depth_info &= C_028010_ARRAY_MODE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + if (reloc->tiling_flags & RADEON_TILING_MACRO) { ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); } else { @@ -1070,9 +1084,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->vgt_strmout_bo[tmp] = reloc->robj; - track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; + track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset; track->streamout_dirty = true; break; case VGT_STRMOUT_BUFFER_SIZE_0: @@ -1091,7 +1105,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case R_028238_CB_TARGET_MASK: track->cb_target_mask = radeon_get_ib_value(p, idx); @@ -1128,10 +1142,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + if (reloc->tiling_flags & RADEON_TILING_MACRO) { ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + } else if (reloc->tiling_flags & RADEON_TILING_MICRO) { ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); } @@ -1200,7 +1214,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } track->cb_color_frag_bo[tmp] = reloc->robj; track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; @@ -1231,7 +1245,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } track->cb_color_tile_bo[tmp] = reloc->robj; track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; @@ -1267,10 +1281,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } tmp = (reg - CB_COLOR0_BASE) / 4; track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->cb_color_base_last[tmp] = ib[idx]; track->cb_color_bo[tmp] = reloc->robj; - track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; + track->cb_color_bo_mc[tmp] = reloc->gpu_offset; track->cb_dirty = true; break; case DB_DEPTH_BASE: @@ -1281,9 +1295,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } track->db_offset = radeon_get_ib_value(p, idx) << 8; - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->db_bo = reloc->robj; - track->db_bo_mc = reloc->lobj.gpu_offset; + track->db_bo_mc = reloc->gpu_offset; track->db_dirty = true; break; case DB_HTILE_DATA_BASE: @@ -1294,7 +1308,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } track->htile_offset = radeon_get_ib_value(p, idx) << 8; - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->htile_bo = reloc->robj; track->db_dirty = true; break; @@ -1363,7 +1377,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MEMORY_EXPORT_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); @@ -1372,7 +1386,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MISC: track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; @@ -1658,7 +1672,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (idx_value & 0xfffffff0) + ((u64)(tmp & 0xff) << 32); @@ -1699,7 +1713,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + idx_value + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); @@ -1751,7 +1765,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -1791,7 +1805,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, tmp = radeon_get_ib_value(p, idx) + ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); - offset = reloc->lobj.gpu_offset + tmp; + offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", @@ -1821,7 +1835,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, tmp = radeon_get_ib_value(p, idx+2) + ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); - offset = reloc->lobj.gpu_offset + tmp; + offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", @@ -1847,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad SURFACE_SYNC\n"); return -EINVAL; } - ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_EVENT_WRITE: @@ -1863,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad EVENT_WRITE\n"); return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -1885,7 +1899,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = reloc->lobj.gpu_offset + + offset = reloc->gpu_offset + (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); @@ -1950,11 +1964,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } - base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + if (reloc->tiling_flags & RADEON_TILING_MACRO) ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + else if (reloc->tiling_flags & RADEON_TILING_MICRO) ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } texture = reloc->robj; @@ -1964,13 +1978,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } - mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); mipmap = reloc->robj; r = r600_check_texture_resource(p, idx+(i*7)+1, texture, mipmap, base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), - reloc->lobj.tiling_flags); + reloc->tiling_flags); if (r) return r; ib[idx+1+(i*7)+2] += base_offset; @@ -1994,7 +2008,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; } - offset64 = reloc->lobj.gpu_offset + offset; + offset64 = reloc->gpu_offset + offset; ib[idx+1+(i*8)+0] = offset64; ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | (upper_32_bits(offset64) & 0xff); @@ -2104,7 +2118,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); } break; case PACKET3_SURFACE_BASE_UPDATE: @@ -2137,7 +2151,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } @@ -2156,7 +2170,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } @@ -2185,7 +2199,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+0] = offset; ib[idx+1] = upper_32_bits(offset) & 0xff; break; @@ -2210,7 +2224,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+1] = offset; ib[idx+2] = upper_32_bits(offset) & 0xff; } else { @@ -2234,7 +2248,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } - offset += reloc->lobj.gpu_offset; + offset += reloc->gpu_offset; ib[idx+3] = offset; ib[idx+4] = upper_32_bits(offset) & 0xff; } else { @@ -2491,14 +2505,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); p->idx += count + 5; } else { dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; p->idx += count + 3; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { @@ -2525,22 +2539,22 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) /* tiled src, linear dst */ src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; - ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); dst_offset = radeon_get_ib_value(p, idx+5); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; - ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; } else { /* linear src, tiled dst */ src_offset = radeon_get_ib_value(p, idx+5); src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; - ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff; dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); + ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } p->idx += 7; } else { @@ -2550,10 +2564,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; - ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; + ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; p->idx += 5; } else { src_offset = radeon_get_ib_value(p, idx+2); @@ -2561,10 +2575,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); + ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff; + ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16; p->idx += 4; } } @@ -2596,8 +2610,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; + ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); + ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000; p->idx += 4; break; case DMA_PACKET_NOP: diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index b2d4c91e627..4969cef44a1 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -176,8 +176,6 @@ int r600_dma_resume(struct radeon_device *rdev) ring->wptr = 0; WREG32(DMA_RB_WPTR, ring->wptr << 2); - ring->rptr = RREG32(DMA_RB_RPTR) >> 2; - WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); ring->ready = true; @@ -221,11 +219,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) u32 reset_mask = r600_gpu_check_soft_reset(rdev); if (!(reset_mask & RADEON_RESET_DMA)) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -493,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index e4cc9b314ce..9c61b74ef44 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) u32 line_time_us, vblank_lines; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / - radeon_crtc->hw_mode.clock; - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - - radeon_crtc->hw_mode.crtc_vdisplay + - (radeon_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; - break; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { + line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / + radeon_crtc->hw_mode.clock; + vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - + radeon_crtc->hw_mode.crtc_vdisplay + + (radeon_crtc->v_border * 2); + vblank_time_us = vblank_lines * line_time_us; + break; + } } } @@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) struct radeon_crtc *radeon_crtc; u32 vrefresh = 0; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - vrefresh = radeon_crtc->hw_mode.vrefresh; - break; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { + vrefresh = radeon_crtc->hw_mode.vrefresh; + break; + } } } - return vrefresh; } @@ -834,6 +837,26 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen return 0; } +int r600_get_platform_caps(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); + rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); + rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); + + return 0; +} + /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 @@ -1043,7 +1066,15 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1 + array->ucNumEntries * sizeof(VCEClockInfo)); + ATOM_PPLIB_VCE_State_Table *states = + (ATOM_PPLIB_VCE_State_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + + 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + + 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; + ATOM_PPLIB_VCE_State_Record *state_entry; + VCEClockInfo *vce_clk; u32 size = limits->numEntries * sizeof(struct radeon_vce_clock_voltage_dependency_entry); rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = @@ -1055,8 +1086,9 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = limits->numEntries; entry = &limits->entries[0]; + state_entry = &states->entries[0]; for (i = 0; i < limits->numEntries; i++) { - VCEClockInfo *vce_clk = (VCEClockInfo *) + vce_clk = (VCEClockInfo *) ((u8 *)&array->entries[0] + (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = @@ -1068,6 +1100,23 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); } + for (i = 0; i < states->numEntries; i++) { + if (i >= RADEON_MAX_VCE_LEVELS) + break; + vce_clk = (VCEClockInfo *) + ((u8 *)&array->entries[0] + + (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); + rdev->pm.dpm.vce_states[i].evclk = + le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); + rdev->pm.dpm.vce_states[i].ecclk = + le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); + rdev->pm.dpm.vce_states[i].clk_idx = + state_entry->ucClockInfoIndex & 0x3f; + rdev->pm.dpm.vce_states[i].pstate = + (state_entry->ucClockInfoIndex & 0xc0) >> 6; + state_entry = (ATOM_PPLIB_VCE_State_Record *) + ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); + } } if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && ext_hdr->usUVDTableOffset) { diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h index 07eab2b04e8..46b9d2a0301 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.h +++ b/drivers/gpu/drm/radeon/r600_dpm.h @@ -215,6 +215,8 @@ void r600_stop_dpm(struct radeon_device *rdev); bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor); +int r600_get_platform_caps(struct radeon_device *rdev); + int r600_parse_extended_power_table(struct radeon_device *rdev); void r600_free_extended_power_table(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 3016fc14f50..26ef8ced6f8 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -133,7 +133,7 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock) /* * update the N and CTS parameters for a given pixel clock rate */ -static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) +void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -142,21 +142,33 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t offset = dig->afmt->offset; - WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz)); - WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz); - - WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz)); - WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz); - - WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz)); - WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz); + WREG32_P(HDMI0_ACR_32_0 + offset, + HDMI0_ACR_CTS_32(acr.cts_32khz), + ~HDMI0_ACR_CTS_32_MASK); + WREG32_P(HDMI0_ACR_32_1 + offset, + HDMI0_ACR_N_32(acr.n_32khz), + ~HDMI0_ACR_N_32_MASK); + + WREG32_P(HDMI0_ACR_44_0 + offset, + HDMI0_ACR_CTS_44(acr.cts_44_1khz), + ~HDMI0_ACR_CTS_44_MASK); + WREG32_P(HDMI0_ACR_44_1 + offset, + HDMI0_ACR_N_44(acr.n_44_1khz), + ~HDMI0_ACR_N_44_MASK); + + WREG32_P(HDMI0_ACR_48_0 + offset, + HDMI0_ACR_CTS_48(acr.cts_48khz), + ~HDMI0_ACR_CTS_48_MASK); + WREG32_P(HDMI0_ACR_48_1 + offset, + HDMI0_ACR_N_48(acr.n_48khz), + ~HDMI0_ACR_N_48_MASK); } /* * build a HDMI Video Info Frame */ -static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, - void *buffer, size_t size) +void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, + size_t size) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -231,7 +243,7 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder) /* * write the audio workaround status to the hardware */ -static void r600_hdmi_audio_workaround(struct drm_encoder *encoder) +void r600_hdmi_audio_workaround(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -250,7 +262,7 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder) value, ~HDMI0_AUDIO_TEST_EN); } -static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) +void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -320,124 +332,6 @@ static void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) } } -static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) -{ - struct radeon_device *rdev = encoder->dev->dev_private; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; - u32 tmp; - u8 *sadb; - int sad_count; - - /* XXX: setting this register causes hangs on some asics */ - return; - - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sad_count < 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - return; - } - - /* program the speaker allocation */ - tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); - tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); - /* set HDMI mode */ - tmp |= HDMI_CONNECTION; - if (sad_count) - tmp |= SPEAKER_ALLOCATION(sadb[0]); - else - tmp |= SPEAKER_ALLOCATION(5); /* stereo */ - WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); - - kfree(sadb); -} - -static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) -{ - struct radeon_device *rdev = encoder->dev->dev_private; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; - struct cea_sad *sads; - int i, sad_count; - - static const u16 eld_reg_to_type[][2] = { - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, - { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, - }; - - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return; - } - - sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { - DRM_ERROR("Couldn't read SADs: %d\n", sad_count); - return; - } - BUG_ON(!sads); - - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { - u32 value = 0; - u8 stereo_freqs = 0; - int max_channels = -1; - int j; - - for (j = 0; j < sad_count; j++) { - struct cea_sad *sad = &sads[j]; - - if (sad->format == eld_reg_to_type[i][1]) { - if (sad->channels > max_channels) { - value = MAX_CHANNELS(sad->channels) | - DESCRIPTOR_BYTE_2(sad->byte2) | - SUPPORTED_FREQUENCIES(sad->freq); - max_channels = sad->channels; - } - - if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) - stereo_freqs |= sad->freq; - else - break; - } - } - - value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); - - WREG32(eld_reg_to_type[i][0], value); - } - - kfree(sads); -} - /* * update the info frames with the data from the current display mode */ @@ -450,6 +344,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; struct hdmi_avi_infoframe frame; uint32_t offset; + uint32_t acr_ctl; ssize_t err; if (!dig || !dig->afmt) @@ -460,54 +355,50 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod return; offset = dig->afmt->offset; - r600_audio_set_dto(encoder, mode->clock); + /* disable audio prior to setting up hw */ + dig->afmt->pin = r600_audio_get_pin(rdev); + r600_audio_enable(rdev, dig->afmt->pin, false); - WREG32(HDMI0_VBI_PACKET_CONTROL + offset, - HDMI0_NULL_SEND); /* send null packets when required */ - - WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000); - - if (ASIC_IS_DCE32(rdev)) { - WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, - HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ - HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ - WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, - AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */ - AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ - } else { - WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, - HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ - HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ - HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ - HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ - } - - if (ASIC_IS_DCE32(rdev)) { - dce3_2_afmt_write_speaker_allocation(encoder); - dce3_2_afmt_write_sad_regs(encoder); - } - - WREG32(HDMI0_ACR_PACKET_CONTROL + offset, - HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ - HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ - - WREG32(HDMI0_VBI_PACKET_CONTROL + offset, - HDMI0_NULL_SEND | /* send null packets when required */ - HDMI0_GC_SEND | /* send general control packets */ - HDMI0_GC_CONT); /* send general control packets every frame */ - - /* TODO: HDMI0_AUDIO_INFO_UPDATE */ - WREG32(HDMI0_INFOFRAME_CONTROL0 + offset, - HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ - HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ - HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ - HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */ - - WREG32(HDMI0_INFOFRAME_CONTROL1 + offset, - HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ - HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */ + r600_audio_set_dto(encoder, mode->clock); - WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */ + WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, + HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ + HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ + HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ + HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */ + ~(HDMI0_AUDIO_SAMPLE_SEND | + HDMI0_AUDIO_DELAY_EN_MASK | + HDMI0_AUDIO_PACKETS_PER_LINE_MASK | + HDMI0_60958_CS_UPDATE)); + + /* DCE 3.0 uses register that's normally for CRC_CONTROL */ + acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL : + HDMI0_ACR_PACKET_CONTROL; + WREG32_P(acr_ctl + offset, + HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ + HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */ + ~(HDMI0_ACR_SOURCE | + HDMI0_ACR_AUTO_SEND)); + + WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset, + HDMI0_NULL_SEND | /* send null packets when required */ + HDMI0_GC_SEND | /* send general control packets */ + HDMI0_GC_CONT); /* send general control packets every frame */ + + WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, + HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ + HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ + HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ + + WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset, + HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ + HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */ + ~(HDMI0_AVI_INFO_LINE_MASK | + HDMI0_AUDIO_INFO_LINE_MASK)); + + WREG32_AND(HDMI0_GC + offset, + ~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (err < 0) { @@ -522,19 +413,45 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod } r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); + + /* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */ + + WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset, + ~(HDMI0_GENERIC0_SEND | + HDMI0_GENERIC0_CONT | + HDMI0_GENERIC0_UPDATE | + HDMI0_GENERIC1_SEND | + HDMI0_GENERIC1_CONT | + HDMI0_GENERIC0_LINE_MASK | + HDMI0_GENERIC1_LINE_MASK)); + r600_hdmi_update_ACR(encoder, mode->clock); + WREG32_P(HDMI0_60958_0 + offset, + HDMI0_60958_CS_CHANNEL_NUMBER_L(1), + ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK | + HDMI0_60958_CS_CLOCK_ACCURACY_MASK)); + + WREG32_P(HDMI0_60958_1 + offset, + HDMI0_60958_CS_CHANNEL_NUMBER_R(2), + ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK); + /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF); WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF); WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001); WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); - r600_hdmi_audio_workaround(encoder); + /* enable audio after to setting up hw */ + r600_audio_enable(rdev, dig->afmt->pin, true); } -/* - * update settings with current parameters from audio engine +/** + * r600_hdmi_update_audio_settings - Update audio infoframe + * + * @encoder: drm encoder + * + * Gets info about current audio stream and updates audio infoframe. */ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) { @@ -546,7 +463,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE]; struct hdmi_audio_infoframe frame; uint32_t offset; - uint32_t iec; + uint32_t value; ssize_t err; if (!dig->afmt || !dig->afmt->enabled) @@ -559,60 +476,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n", (int)audio.status_bits, (int)audio.category_code); - iec = 0; - if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL) - iec |= 1 << 0; - if (audio.status_bits & AUDIO_STATUS_NONAUDIO) - iec |= 1 << 1; - if (audio.status_bits & AUDIO_STATUS_COPYRIGHT) - iec |= 1 << 2; - if (audio.status_bits & AUDIO_STATUS_EMPHASIS) - iec |= 1 << 3; - - iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code); - - switch (audio.rate) { - case 32000: - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3); - break; - case 44100: - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0); - break; - case 48000: - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2); - break; - case 88200: - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8); - break; - case 96000: - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa); - break; - case 176400: - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc); - break; - case 192000: - iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe); - break; - } - - WREG32(HDMI0_60958_0 + offset, iec); - - iec = 0; - switch (audio.bits_per_sample) { - case 16: - iec |= HDMI0_60958_CS_WORD_LENGTH(0x2); - break; - case 20: - iec |= HDMI0_60958_CS_WORD_LENGTH(0x3); - break; - case 24: - iec |= HDMI0_60958_CS_WORD_LENGTH(0xb); - break; - } - if (audio.status_bits & AUDIO_STATUS_V) - iec |= 0x5 << 16; - WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f); - err = hdmi_audio_infoframe_init(&frame); if (err < 0) { DRM_ERROR("failed to setup audio infoframe\n"); @@ -627,8 +490,22 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder) return; } + value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset); + if (value & HDMI0_AUDIO_TEST_EN) + WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, + value & ~HDMI0_AUDIO_TEST_EN); + + WREG32_OR(HDMI0_CONTROL + offset, + HDMI0_ERROR_ACK); + + WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset, + ~HDMI0_AUDIO_INFO_SOURCE); + r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer)); - r600_hdmi_audio_workaround(encoder); + + WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, + HDMI0_AUDIO_INFO_CONT | + HDMI0_AUDIO_INFO_UPDATE); } /* @@ -651,11 +528,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; - if (enable) - dig->afmt->pin = r600_audio_get_pin(rdev); - else - dig->afmt->pin = NULL; - /* Older chipsets require setting HDMI and routing manually */ if (!ASIC_IS_DCE3(rdev)) { if (enable) diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 37455f65107..f94e7a9afe7 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1029,15 +1029,18 @@ #define HDMI0_AUDIO_PACKET_CONTROL 0x7408 # define HDMI0_AUDIO_SAMPLE_SEND (1 << 0) # define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4) +# define HDMI0_AUDIO_DELAY_EN_MASK (3 << 4) # define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8) # define HDMI0_AUDIO_TEST_EN (1 << 12) # define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16) +# define HDMI0_AUDIO_PACKETS_PER_LINE_MASK (0x1f << 16) # define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24) # define HDMI0_60958_CS_UPDATE (1 << 26) # define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28) # define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29) #define HDMI0_AUDIO_CRC_CONTROL 0x740c # define HDMI0_AUDIO_CRC_EN (1 << 0) +#define DCE3_HDMI0_ACR_PACKET_CONTROL 0x740c #define HDMI0_VBI_PACKET_CONTROL 0x7410 # define HDMI0_NULL_SEND (1 << 0) # define HDMI0_GC_SEND (1 << 4) @@ -1054,7 +1057,9 @@ # define HDMI0_MPEG_INFO_UPDATE (1 << 10) #define HDMI0_INFOFRAME_CONTROL1 0x7418 # define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0) +# define HDMI0_AVI_INFO_LINE_MASK (0x3f << 0) # define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8) +# define HDMI0_AUDIO_INFO_LINE_MASK (0x3f << 8) # define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16) #define HDMI0_GENERIC_PACKET_CONTROL 0x741c # define HDMI0_GENERIC0_SEND (1 << 0) @@ -1063,7 +1068,9 @@ # define HDMI0_GENERIC1_SEND (1 << 4) # define HDMI0_GENERIC1_CONT (1 << 5) # define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16) +# define HDMI0_GENERIC0_LINE_MASK (0x3f << 16) # define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24) +# define HDMI0_GENERIC1_LINE_MASK (0x3f << 24) #define HDMI0_GC 0x7428 # define HDMI0_GC_AVMUTE (1 << 0) #define HDMI0_AVI_INFO0 0x7454 @@ -1119,16 +1126,22 @@ #define HDMI0_GENERIC1_6 0x74a8 #define HDMI0_ACR_32_0 0x74ac # define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12) +# define HDMI0_ACR_CTS_32_MASK (0xfffff << 12) #define HDMI0_ACR_32_1 0x74b0 # define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0) +# define HDMI0_ACR_N_32_MASK (0xfffff << 0) #define HDMI0_ACR_44_0 0x74b4 # define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12) +# define HDMI0_ACR_CTS_44_MASK (0xfffff << 12) #define HDMI0_ACR_44_1 0x74b8 # define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0) +# define HDMI0_ACR_N_44_MASK (0xfffff << 0) #define HDMI0_ACR_48_0 0x74bc # define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12) +# define HDMI0_ACR_CTS_48_MASK (0xfffff << 12) #define HDMI0_ACR_48_1 0x74c0 # define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0) +# define HDMI0_ACR_N_48_MASK (0xfffff << 0) #define HDMI0_ACR_STATUS_0 0x74c4 #define HDMI0_ACR_STATUS_1 0x74c8 #define HDMI0_AUDIO_INFO0 0x74cc @@ -1148,14 +1161,17 @@ # define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8) # define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16) # define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20) +# define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK (0xf << 20) # define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24) # define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28) +# define HDMI0_60958_CS_CLOCK_ACCURACY_MASK (3 << 28) #define HDMI0_60958_1 0x74d8 # define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0) # define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4) # define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16) # define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18) # define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20) +# define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK (0xf << 20) #define HDMI0_ACR_PACKET_CONTROL 0x74dc # define HDMI0_ACR_SEND (1 << 0) # define HDMI0_ACR_CONT (1 << 1) @@ -1166,6 +1182,7 @@ # define HDMI0_ACR_48 3 # define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */ # define HDMI0_ACR_AUTO_SEND (1 << 12) +#define DCE3_HDMI0_AUDIO_CRC_CONTROL 0x74dc #define HDMI0_RAMP_CONTROL0 0x74e0 # define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0) #define HDMI0_RAMP_CONTROL1 0x74e4 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4a8ac1cd6b4..60c47f82912 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -100,6 +100,9 @@ extern int radeon_dpm; extern int radeon_aspm; extern int radeon_runtime_pm; extern int radeon_hard_reset; +extern int radeon_vm_size; +extern int radeon_vm_block_size; +extern int radeon_deep_color; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -113,19 +116,16 @@ extern int radeon_hard_reset; #define RADEONFB_CONN_LIMIT 4 #define RADEON_BIOS_NUM_SCRATCH 8 -/* max number of rings */ -#define RADEON_NUM_RINGS 6 - /* fence seq are set to this number when signaled */ #define RADEON_FENCE_SIGNALED_SEQ 0LL /* internal ring indices */ /* r1xx+ has gfx CP ring */ -#define RADEON_RING_TYPE_GFX_INDEX 0 +#define RADEON_RING_TYPE_GFX_INDEX 0 /* cayman has 2 compute CP rings */ -#define CAYMAN_RING_TYPE_CP1_INDEX 1 -#define CAYMAN_RING_TYPE_CP2_INDEX 2 +#define CAYMAN_RING_TYPE_CP1_INDEX 1 +#define CAYMAN_RING_TYPE_CP2_INDEX 2 /* R600+ has an async dma ring */ #define R600_RING_TYPE_DMA_INDEX 3 @@ -133,7 +133,20 @@ extern int radeon_hard_reset; #define CAYMAN_RING_TYPE_DMA1_INDEX 4 /* R600+ */ -#define R600_RING_TYPE_UVD_INDEX 5 +#define R600_RING_TYPE_UVD_INDEX 5 + +/* TN+ */ +#define TN_RING_TYPE_VCE1_INDEX 6 +#define TN_RING_TYPE_VCE2_INDEX 7 + +/* max number of rings */ +#define RADEON_NUM_RINGS 8 + +/* number of hw syncs before falling back on blocking */ +#define RADEON_NUM_SYNCS 4 + +/* number of hw syncs before falling back on blocking */ +#define RADEON_NUM_SYNCS 4 /* hardcode those limit for now */ #define RADEON_VA_IB_OFFSET (1 << 20) @@ -353,9 +366,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); -int radeon_fence_wait_locked(struct radeon_fence *fence); -int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); -int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); +int radeon_fence_wait_next(struct radeon_device *rdev, int ring); +int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); int radeon_fence_wait_any(struct radeon_device *rdev, struct radeon_fence **fences, bool intr); @@ -437,6 +449,7 @@ struct radeon_bo_va { /* protected by vm mutex */ struct list_head vm_list; + struct list_head vm_status; /* constant after initialization */ struct radeon_vm *vm; @@ -447,6 +460,7 @@ struct radeon_bo { /* Protected by gem.mutex */ struct list_head list; /* Protected by tbo.reserved */ + u32 initial_domain; u32 placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; @@ -469,16 +483,6 @@ struct radeon_bo { }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) -struct radeon_bo_list { - struct ttm_validate_buffer tv; - struct radeon_bo *bo; - uint64_t gpu_offset; - bool written; - unsigned domain; - unsigned alt_domain; - u32 tiling_flags; -}; - int radeon_gem_debugfs_init(struct radeon_device *rdev); /* sub-allocation manager, it has to be protected by another lock. @@ -554,7 +558,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, /* * Semaphores. */ -/* everything here is constant */ struct radeon_semaphore { struct radeon_sa_bo *sa_bo; signed waiters; @@ -677,14 +680,15 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell); * IRQS. */ -struct radeon_unpin_work { - struct work_struct work; - struct radeon_device *rdev; - int crtc_id; - struct radeon_fence *fence; +struct radeon_flip_work { + struct work_struct flip_work; + struct work_struct unpin_work; + struct radeon_device *rdev; + int crtc_id; + uint64_t base; struct drm_pending_vblank_event *event; - struct radeon_bo *old_rbo; - u64 new_crtc_base; + struct radeon_bo *old_rbo; + struct radeon_fence *fence; }; struct r500_irq_stat_regs { @@ -731,6 +735,12 @@ struct cik_irq_stat_regs { u32 disp_int_cont4; u32 disp_int_cont5; u32 disp_int_cont6; + u32 d1grph_int; + u32 d2grph_int; + u32 d3grph_int; + u32 d4grph_int; + u32 d5grph_int; + u32 d6grph_int; }; union radeon_irq_stat_regs { @@ -740,10 +750,6 @@ union radeon_irq_stat_regs { struct cik_irq_stat_regs cik; }; -#define RADEON_MAX_HPD_PINS 6 -#define RADEON_MAX_CRTCS 6 -#define RADEON_MAX_AFMT_BLOCKS 7 - struct radeon_irq { bool installed; spinlock_t lock; @@ -787,7 +793,6 @@ struct radeon_ib { struct radeon_ring { struct radeon_bo *ring_obj; volatile uint32_t *ring; - unsigned rptr; unsigned rptr_offs; unsigned rptr_save_reg; u64 next_rptr_gpu_addr; @@ -797,8 +802,8 @@ struct radeon_ring { unsigned ring_size; unsigned ring_free_dw; int count_dw; - unsigned long last_activity; - unsigned last_rptr; + atomic_t last_rptr; + atomic64_t last_activity; uint64_t gpu_addr; uint32_t align_mask; uint32_t ptr_mask; @@ -831,13 +836,8 @@ struct radeon_mec { /* maximum number of VMIDs */ #define RADEON_NUM_VM 16 -/* defines number of bits in page table versus page directory, - * a page is 4KB so we have 12 bits offset, 9 bits in the page - * table and the remaining 19 bits are in the page directory */ -#define RADEON_VM_BLOCK_SIZE 9 - /* number of entries in page table */ -#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) +#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size) /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define RADEON_VM_PTB_ALIGN_SIZE 32768 @@ -850,17 +850,36 @@ struct radeon_mec { #define R600_PTE_READABLE (1 << 5) #define R600_PTE_WRITEABLE (1 << 6) +/* PTE (Page Table Entry) fragment field for different page sizes */ +#define R600_PTE_FRAG_4KB (0 << 7) +#define R600_PTE_FRAG_64KB (4 << 7) +#define R600_PTE_FRAG_256KB (6 << 7) + +/* flags used for GART page table entries on R600+ */ +#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \ + | R600_PTE_READABLE | R600_PTE_WRITEABLE) + +struct radeon_vm_pt { + struct radeon_bo *bo; + uint64_t addr; +}; + struct radeon_vm { - struct list_head list; struct list_head va; unsigned id; + /* BOs freed, but not yet updated in the PT */ + struct list_head freed; + /* contains the page directory */ - struct radeon_sa_bo *page_directory; + struct radeon_bo *page_directory; uint64_t pd_gpu_addr; + unsigned max_pde_used; /* array of page tables, one for each page directory entry */ - struct radeon_sa_bo **page_tables; + struct radeon_vm_pt *page_tables; + + struct radeon_bo_va *ib_bo_va; struct mutex mutex; /* last fence for cs using this vm */ @@ -872,10 +891,7 @@ struct radeon_vm { }; struct radeon_vm_manager { - struct mutex lock; - struct list_head lru_vm; struct radeon_fence *active[RADEON_NUM_VM]; - struct radeon_sa_manager sa_manager; uint32_t max_pfn; /* number of VMIDs */ unsigned nvm; @@ -951,8 +967,8 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *c void radeon_ring_undo(struct radeon_ring *ring); void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); -void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); -void radeon_ring_lockup_update(struct radeon_ring *ring); +void radeon_ring_lockup_update(struct radeon_device *rdev, + struct radeon_ring *ring); bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, uint32_t **data); @@ -978,9 +994,12 @@ void cayman_dma_fini(struct radeon_device *rdev); struct radeon_cs_reloc { struct drm_gem_object *gobj; struct radeon_bo *robj; - struct radeon_bo_list lobj; + struct ttm_validate_buffer tv; + uint64_t gpu_offset; + unsigned prefered_domains; + unsigned allowed_domains; + uint32_t tiling_flags; uint32_t handle; - uint32_t flags; }; struct radeon_cs_chunk { @@ -1004,6 +1023,7 @@ struct radeon_cs_parser { unsigned nrelocs; struct radeon_cs_reloc *relocs; struct radeon_cs_reloc **relocs_ptr; + struct radeon_cs_reloc *vm_bos; struct list_head validated; unsigned dma_reloc_idx; /* indices of various chunks */ @@ -1253,6 +1273,17 @@ enum radeon_dpm_event_src { RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 }; +#define RADEON_MAX_VCE_LEVELS 6 + +enum radeon_vce_level { + RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ + RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ + RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ + RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ + RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ + RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ +}; + struct radeon_ps { u32 caps; /* vbios flags */ u32 class; /* vbios flags */ @@ -1263,6 +1294,8 @@ struct radeon_ps { /* VCE clocks */ u32 evclk; u32 ecclk; + bool vce_active; + enum radeon_vce_level vce_level; /* asic priv */ void *ps_priv; }; @@ -1437,6 +1470,17 @@ enum radeon_dpm_forced_level { RADEON_DPM_FORCED_LEVEL_HIGH = 2, }; +struct radeon_vce_state { + /* vce clocks */ + u32 evclk; + u32 ecclk; + /* gpu clocks */ + u32 sclk; + u32 mclk; + u8 clk_idx; + u8 pstate; +}; + struct radeon_dpm { struct radeon_ps *ps; /* number of valid power states */ @@ -1449,6 +1493,9 @@ struct radeon_dpm { struct radeon_ps *boot_ps; /* default uvd power state */ struct radeon_ps *uvd_ps; + /* vce requirements */ + struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS]; + enum radeon_vce_level vce_level; enum radeon_pm_state_type state; enum radeon_pm_state_type user_state; u32 platform_caps; @@ -1474,6 +1521,7 @@ struct radeon_dpm { /* special states active */ bool thermal_active; bool uvd_active; + bool vce_active; /* thermal handling */ struct radeon_dpm_thermal thermal; /* forced levels */ @@ -1484,6 +1532,7 @@ struct radeon_dpm { }; void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable); +void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable); struct radeon_pm { struct mutex mutex; @@ -1589,6 +1638,46 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, unsigned cg_upll_func_cntl); +/* + * VCE + */ +#define RADEON_MAX_VCE_HANDLES 16 +#define RADEON_VCE_STACK_SIZE (1024*1024) +#define RADEON_VCE_HEAP_SIZE (4*1024*1024) + +struct radeon_vce { + struct radeon_bo *vcpu_bo; + uint64_t gpu_addr; + unsigned fw_version; + unsigned fb_version; + atomic_t handles[RADEON_MAX_VCE_HANDLES]; + struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; + unsigned img_size[RADEON_MAX_VCE_HANDLES]; + struct delayed_work idle_work; +}; + +int radeon_vce_init(struct radeon_device *rdev); +void radeon_vce_fini(struct radeon_device *rdev); +int radeon_vce_suspend(struct radeon_device *rdev); +int radeon_vce_resume(struct radeon_device *rdev); +int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, + uint32_t handle, struct radeon_fence **fence); +int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, + uint32_t handle, struct radeon_fence **fence); +void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); +void radeon_vce_note_usage(struct radeon_device *rdev); +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size); +int radeon_vce_cs_parse(struct radeon_cs_parser *p); +bool radeon_vce_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); +void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); +void radeon_vce_fence_emit(struct radeon_device *rdev, + struct radeon_fence *fence); +int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); +int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); + struct r600_audio_pin { int channels; int rate; @@ -1692,7 +1781,8 @@ struct radeon_asic { /* gart */ struct { void (*tlb_flush)(struct radeon_device *rdev); - int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); + void (*set_page)(struct radeon_device *rdev, unsigned i, + uint64_t addr); } gart; struct { int (*init)(struct radeon_device *rdev); @@ -1778,6 +1868,7 @@ struct radeon_asic { void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); void (*set_clock_gating)(struct radeon_device *rdev, int enable); int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); + int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk); int (*get_temperature)(struct radeon_device *rdev); } pm; /* dynamic power management */ @@ -1803,9 +1894,8 @@ struct radeon_asic { } dpm; /* pageflipping */ struct { - void (*pre_page_flip)(struct radeon_device *rdev, int crtc); - u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); - void (*post_page_flip)(struct radeon_device *rdev, int crtc); + void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); + bool (*page_flip_pending)(struct radeon_device *rdev, int crtc); } pflip; }; @@ -1844,6 +1934,7 @@ struct r600_asic { unsigned tiling_group_size; unsigned tile_config; unsigned backend_map; + unsigned active_simds; }; struct rv770_asic { @@ -1869,6 +1960,7 @@ struct rv770_asic { unsigned tiling_group_size; unsigned tile_config; unsigned backend_map; + unsigned active_simds; }; struct evergreen_asic { @@ -1895,6 +1987,7 @@ struct evergreen_asic { unsigned tiling_group_size; unsigned tile_config; unsigned backend_map; + unsigned active_simds; }; struct cayman_asic { @@ -1933,6 +2026,7 @@ struct cayman_asic { unsigned multi_gpu_tile_size; unsigned tile_config; + unsigned active_simds; }; struct si_asic { @@ -1963,6 +2057,7 @@ struct si_asic { unsigned tile_config; uint32_t tile_mode_array[32]; + uint32_t active_cus; }; struct cik_asic { @@ -1994,6 +2089,7 @@ struct cik_asic { unsigned tile_config; uint32_t tile_mode_array[32]; uint32_t macrotile_mode_array[16]; + uint32_t active_cus; }; union radeon_asic_config { @@ -2039,6 +2135,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_op_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -2184,6 +2282,7 @@ struct radeon_device { struct radeon_gem gem; struct radeon_pm pm; struct radeon_uvd uvd; + struct radeon_vce vce; uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; struct radeon_wb wb; struct radeon_dummy_page dummy_page; @@ -2203,6 +2302,7 @@ struct radeon_device { const struct firmware *sdma_fw; /* CIK SDMA firmware */ const struct firmware *smc_fw; /* SMC firmware */ const struct firmware *uvd_fw; /* UVD firmware */ + const struct firmware *vce_fw; /* VCE firmware */ struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ @@ -2227,6 +2327,10 @@ struct radeon_device { /* virtual memory */ struct radeon_vm_manager vm_manager; struct mutex gpu_clock_mutex; + /* memory stats */ + atomic64_t vram_usage; + atomic64_t gtt_usage; + atomic64_t num_bytes_moved; /* ACPI interface */ struct radeon_atif atif; struct radeon_atcs atcs; @@ -2240,6 +2344,7 @@ struct radeon_device { bool have_disp_power_ref; }; +bool radeon_is_px(struct drm_device *dev); int radeon_device_init(struct radeon_device *rdev, struct drm_device *ddev, struct pci_dev *pdev, @@ -2550,6 +2655,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) #define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) +#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) +#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) +#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ + (rdev->family == CHIP_MULLINS)) #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ (rdev->ddev->pdev->device == 0x6850) || \ @@ -2637,6 +2746,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) #define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) +#define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec)) #define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev)) #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) @@ -2651,9 +2761,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) -#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) #define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) -#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) +#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc)) #define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) #define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) #define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) @@ -2713,19 +2822,26 @@ extern void radeon_program_register_sequence(struct radeon_device *rdev, */ int radeon_vm_manager_init(struct radeon_device *rdev); void radeon_vm_manager_fini(struct radeon_device *rdev); -void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); -int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); -void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); +struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, + struct radeon_vm *vm, + struct list_head *head); struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, struct radeon_vm *vm, int ring); +void radeon_vm_flush(struct radeon_device *rdev, + struct radeon_vm *vm, + int ring); void radeon_vm_fence(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_fence *fence); uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); +int radeon_vm_update_page_directory(struct radeon_device *rdev, + struct radeon_vm *vm); +int radeon_vm_clear_freed(struct radeon_device *rdev, + struct radeon_vm *vm); int radeon_vm_bo_update(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, + struct radeon_bo_va *bo_va, struct ttm_mem_reg *mem); void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo); @@ -2738,13 +2854,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, struct radeon_bo_va *bo_va, uint64_t offset, uint32_t flags); -int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va); +void radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_bo_va *bo_va); /* audio */ void r600_audio_update_hdmi(struct work_struct *work); struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); +void r600_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable); +void dce6_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable); /* * R600 vram scratch functions diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 42433344cb1..a9297b2c352 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -117,9 +117,6 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */ { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61, PCI_VENDOR_ID_SONY, 0x8175, 1}, - /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */ - { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47, - PCI_VENDOR_ID_ATI, 0x0152, 2}, { 0, 0, 0, 0, 0, 0, 0 }, }; #endif diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f74db43346f..34b9aa9e3c0 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -248,9 +248,8 @@ static struct radeon_asic r100_asic = { .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { - .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, - .post_page_flip = &r100_post_page_flip, + .page_flip_pending = &r100_page_flip_pending, }, }; @@ -315,9 +314,8 @@ static struct radeon_asic r200_asic = { .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { - .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, - .post_page_flip = &r100_post_page_flip, + .page_flip_pending = &r100_page_flip_pending, }, }; @@ -396,9 +394,8 @@ static struct radeon_asic r300_asic = { .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { - .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, - .post_page_flip = &r100_post_page_flip, + .page_flip_pending = &r100_page_flip_pending, }, }; @@ -463,9 +460,8 @@ static struct radeon_asic r300_asic_pcie = { .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { - .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, - .post_page_flip = &r100_post_page_flip, + .page_flip_pending = &r100_page_flip_pending, }, }; @@ -530,9 +526,8 @@ static struct radeon_asic r420_asic = { .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { - .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, - .post_page_flip = &r100_post_page_flip, + .page_flip_pending = &r100_page_flip_pending, }, }; @@ -597,9 +592,8 @@ static struct radeon_asic rs400_asic = { .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { - .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, - .post_page_flip = &r100_post_page_flip, + .page_flip_pending = &r100_page_flip_pending, }, }; @@ -666,9 +660,8 @@ static struct radeon_asic rs600_asic = { .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rs600_page_flip_pending, }, }; @@ -735,9 +728,8 @@ static struct radeon_asic rs690_asic = { .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rs600_page_flip_pending, }, }; @@ -802,9 +794,8 @@ static struct radeon_asic rv515_asic = { .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rs600_page_flip_pending, }, }; @@ -869,9 +860,8 @@ static struct radeon_asic r520_asic = { .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rs600_page_flip_pending, }, }; @@ -968,9 +958,8 @@ static struct radeon_asic r600_asic = { .get_temperature = &rv6xx_get_temp, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rs600_page_flip_pending, }, }; @@ -1059,9 +1048,8 @@ static struct radeon_asic rv6xx_asic = { .force_performance_level = &rv6xx_dpm_force_performance_level, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rs600_page_flip_pending, }, }; @@ -1150,9 +1138,8 @@ static struct radeon_asic rs780_asic = { .force_performance_level = &rs780_dpm_force_performance_level, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rs600_page_flip_pending, }, }; @@ -1201,7 +1188,7 @@ static struct radeon_asic rv770_asic = { .set_backlight_level = &atombios_set_backlight_level, .get_backlight_level = &atombios_get_backlight_level, .hdmi_enable = &r600_hdmi_enable, - .hdmi_setmode = &r600_hdmi_setmode, + .hdmi_setmode = &dce3_1_hdmi_setmode, }, .copy = { .blit = &r600_copy_cpdma, @@ -1256,9 +1243,8 @@ static struct radeon_asic rv770_asic = { .vblank_too_short = &rv770_dpm_vblank_too_short, }, .pflip = { - .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rv770_page_flip, - .post_page_flip = &rs600_post_page_flip, + .page_flip_pending = &rv770_page_flip_pending, }, }; @@ -1375,9 +1361,8 @@ static struct radeon_asic evergreen_asic = { .vblank_too_short = &cypress_dpm_vblank_too_short, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -1467,9 +1452,8 @@ static struct radeon_asic sumo_asic = { .force_performance_level = &sumo_dpm_force_performance_level, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -1555,14 +1539,13 @@ static struct radeon_asic btc_asic = { .get_sclk = &btc_dpm_get_sclk, .get_mclk = &btc_dpm_get_mclk, .print_power_state = &rv770_dpm_print_power_state, - .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, + .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, .force_performance_level = &rv770_dpm_force_performance_level, .vblank_too_short = &btc_dpm_vblank_too_short, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -1704,9 +1687,8 @@ static struct radeon_asic cayman_asic = { .vblank_too_short = &ni_dpm_vblank_too_short, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -1805,9 +1787,8 @@ static struct radeon_asic trinity_asic = { .enable_bapm = &trinity_dpm_enable_bapm, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -1936,9 +1917,8 @@ static struct radeon_asic si_asic = { .vblank_too_short = &ni_dpm_vblank_too_short, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -1987,6 +1967,19 @@ static struct radeon_asic_ring ci_dma_ring = { .set_wptr = &cik_sdma_set_wptr, }; +static struct radeon_asic_ring ci_vce_ring = { + .ib_execute = &radeon_vce_ib_execute, + .emit_fence = &radeon_vce_fence_emit, + .emit_semaphore = &radeon_vce_semaphore_emit, + .cs_parse = &radeon_vce_cs_parse, + .ring_test = &radeon_vce_ring_test, + .ib_test = &radeon_vce_ib_test, + .is_lockup = &radeon_ring_test_lockup, + .get_rptr = &vce_v1_0_get_rptr, + .get_wptr = &vce_v1_0_get_wptr, + .set_wptr = &vce_v1_0_set_wptr, +}; + static struct radeon_asic ci_asic = { .init = &cik_init, .fini = &cik_fini, @@ -2015,6 +2008,8 @@ static struct radeon_asic ci_asic = { [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, + [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring, + [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring, }, .irq = { .set = &cik_irq_set, @@ -2061,6 +2056,7 @@ static struct radeon_asic ci_asic = { .set_pcie_lanes = NULL, .set_clock_gating = NULL, .set_uvd_clocks = &cik_set_uvd_clocks, + .set_vce_clocks = &cik_set_vce_clocks, .get_temperature = &ci_get_temp, }, .dpm = { @@ -2083,9 +2079,8 @@ static struct radeon_asic ci_asic = { .powergate_uvd = &ci_dpm_powergate_uvd, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -2117,6 +2112,8 @@ static struct radeon_asic kv_asic = { [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring, [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring, [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, + [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring, + [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring, }, .irq = { .set = &cik_irq_set, @@ -2163,6 +2160,7 @@ static struct radeon_asic kv_asic = { .set_pcie_lanes = NULL, .set_clock_gating = NULL, .set_uvd_clocks = &cik_set_uvd_clocks, + .set_vce_clocks = &cik_set_vce_clocks, .get_temperature = &kv_get_temp, }, .dpm = { @@ -2185,9 +2183,8 @@ static struct radeon_asic kv_asic = { .enable_bapm = &kv_dpm_enable_bapm, }, .pflip = { - .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, - .post_page_flip = &evergreen_post_page_flip, + .page_flip_pending = &evergreen_page_flip_pending, }, }; @@ -2497,6 +2494,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: rdev->asic = &kv_asic; /* set num crtcs */ if (rdev->family == CHIP_KAVERI) { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index b3bc433eed4..01e7c0ad8f0 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -67,7 +67,8 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); int r100_asic_reset(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); -int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); @@ -135,9 +136,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev); extern void r100_pm_finish(struct radeon_device *rdev); extern void r100_pm_init_profile(struct radeon_device *rdev); extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); -extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); -extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); -extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); +extern void r100_page_flip(struct radeon_device *rdev, int crtc, + u64 crtc_base); +extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc); extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc); extern int r100_mc_wait_for_idle(struct radeon_device *rdev); @@ -171,7 +172,8 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); extern int r300_cs_parse(struct radeon_cs_parser *p); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); -extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -206,7 +208,8 @@ extern void rs400_fini(struct radeon_device *rdev); extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); -int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int rs400_gart_init(struct radeon_device *rdev); @@ -229,7 +232,8 @@ int rs600_irq_process(struct radeon_device *rdev); void rs600_irq_disable(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); void rs600_gart_tlb_flush(struct radeon_device *rdev); -int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); @@ -241,9 +245,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev, extern void rs600_pm_misc(struct radeon_device *rdev); extern void rs600_pm_prepare(struct radeon_device *rdev); extern void rs600_pm_finish(struct radeon_device *rdev); -extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); -extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); -extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); +extern void rs600_page_flip(struct radeon_device *rdev, int crtc, + u64 crtc_base); +extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc); void rs600_set_safe_registers(struct radeon_device *rdev); extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc); extern int rs600_mc_wait_for_idle(struct radeon_device *rdev); @@ -387,6 +391,11 @@ void r600_rlc_stop(struct radeon_device *rdev); int r600_audio_init(struct radeon_device *rdev); struct r600_audio_pin r600_audio_status(struct radeon_device *rdev); void r600_audio_fini(struct radeon_device *rdev); +void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); +void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, + size_t size); +void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock); +void r600_hdmi_audio_workaround(struct drm_encoder *encoder); int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); @@ -447,7 +456,8 @@ void rv770_fini(struct radeon_device *rdev); int rv770_suspend(struct radeon_device *rdev); int rv770_resume(struct radeon_device *rdev); void rv770_pm_misc(struct radeon_device *rdev); -u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc); void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); void r700_cp_stop(struct radeon_device *rdev); void r700_cp_fini(struct radeon_device *rdev); @@ -458,6 +468,8 @@ int rv770_copy_dma(struct radeon_device *rdev, u32 rv770_get_xclk(struct radeon_device *rdev); int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int rv770_get_temp(struct radeon_device *rdev); +/* hdmi */ +void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); /* rv7xx pm */ int rv770_dpm_init(struct radeon_device *rdev); int rv770_dpm_enable(struct radeon_device *rdev); @@ -513,9 +525,9 @@ extern void sumo_pm_init_profile(struct radeon_device *rdev); extern void btc_pm_init_profile(struct radeon_device *rdev); int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); -extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); -extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); -extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); +extern void evergreen_page_flip(struct radeon_device *rdev, int crtc, + u64 crtc_base); +extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc); extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc); void evergreen_disable_interrupt_state(struct radeon_device *rdev); int evergreen_mc_wait_for_idle(struct radeon_device *rdev); @@ -551,6 +563,8 @@ void btc_dpm_fini(struct radeon_device *rdev); u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); bool btc_dpm_vblank_too_short(struct radeon_device *rdev); +void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m); int sumo_dpm_init(struct radeon_device *rdev); int sumo_dpm_enable(struct radeon_device *rdev); int sumo_dpm_late_enable(struct radeon_device *rdev); @@ -715,6 +729,7 @@ u32 cik_get_xclk(struct radeon_device *rdev); uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg); void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); +int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk); void cik_sdma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, @@ -861,4 +876,17 @@ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, /* uvd v4.2 */ int uvd_v4_2_resume(struct radeon_device *rdev); +/* vce v1.0 */ +uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring); +uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); +void vce_v1_0_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring); +int vce_v1_0_init(struct radeon_device *rdev); +int vce_v1_0_start(struct radeon_device *rdev); + +/* vce v2.0 */ +int vce_v2_0_resume(struct radeon_device *rdev); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 30844814c25..173f378428a 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); if (rdev->clock.default_dispclk == 0) { - if (ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE6(rdev)) + rdev->clock.default_dispclk = 60000; /* 600 Mhz */ + else if (ASIC_IS_DCE5(rdev)) rdev->clock.default_dispclk = 54000; /* 540 Mhz */ else rdev->clock.default_dispclk = 60000; /* 600 Mhz */ } + /* set a reasonable default for DP */ + if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + rdev->clock.default_dispclk / 100); + rdev->clock.default_dispclk = 60000; + } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); rdev->clock.current_dispclk = rdev->clock.default_dispclk; diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 485848f889f..a9fb0d016d3 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -59,7 +59,7 @@ struct atpx_mux { u16 mux; } __packed; -bool radeon_is_px(void) { +bool radeon_has_atpx(void) { return radeon_atpx_priv.atpx_detected; } @@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) memcpy(&output, info->buffer.pointer, size); /* TODO: check version? */ - printk("ATPX version %u\n", output.version); + printk("ATPX version %u, functions 0x%08x\n", + output.version, output.function_bits); radeon_atpx_parse_functions(&atpx->functions, output.function_bits); @@ -527,6 +528,13 @@ static bool radeon_atpx_detect(void) has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); } + /* some newer PX laptops mark the dGPU as a non-VGA display device */ + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + vga_count++; + + has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); + } + if (has_atpx && vga_count == 2) { acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b3633d9a531..6a03624fada 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } } + if (!found) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + } + if (!found) return false; @@ -612,7 +626,7 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) vhdr->DeviceID != rdev->pdev->device) { DRM_INFO("ACPI VFCT table is not for this card\n"); goto out_unmap; - }; + } if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { DRM_ERROR("ACPI VFCT image truncated\n"); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 82d4f865546..44831197e82 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -48,6 +48,7 @@ void radeon_connector_hotplug(struct drm_connector *connector) radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); /* if the connector is already off, don't turn it back on */ + /* FIXME: This access isn't protected by any locks. */ if (connector->dpms != DRM_MODE_DPMS_ON) return; @@ -89,7 +90,7 @@ static void radeon_property_change_mode(struct drm_encoder *encoder) if (crtc && crtc->enabled) { drm_crtc_helper_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, crtc->fb); + crtc->x, crtc->y, crtc->primary->fb); } } @@ -100,6 +101,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; int bpc = 8; + int mode_clock, max_tmds_clock; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: @@ -145,6 +147,64 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) } break; } + + if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + /* hdmi deep color only implemented on DCE4+ */ + if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) { + DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n", + connector->name, bpc); + bpc = 8; + } + + /* + * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make + * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at + * 12 bpc is always supported on hdmi deep color sinks, as this is + * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum. + */ + if (bpc > 12) { + DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n", + connector->name, bpc); + bpc = 12; + } + + /* Any defined maximum tmds clock limit we must not exceed? */ + if (connector->max_tmds_clock > 0) { + /* mode_clock is clock in kHz for mode to be modeset on this connector */ + mode_clock = radeon_connector->pixelclock_for_modeset; + + /* Maximum allowable input clock in kHz */ + max_tmds_clock = connector->max_tmds_clock * 1000; + + DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n", + connector->name, mode_clock, max_tmds_clock); + + /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ + if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { + if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && + (mode_clock * 5/4 <= max_tmds_clock)) + bpc = 10; + else + bpc = 8; + + DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n", + connector->name, bpc); + } + + if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) { + bpc = 8; + DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", + connector->name, bpc); + } + } + } + + if ((radeon_deep_color == 0) && (bpc > 8)) + bpc = 8; + + DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", + connector->name, connector->display_info.bpc, bpc); + return bpc; } @@ -260,13 +320,17 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, continue; if (priority == true) { - DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); - DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); + DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", + conflict->name); + DRM_DEBUG_KMS("in favor of %s\n", + connector->name); conflict->status = connector_status_disconnected; radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); } else { - DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); - DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); + DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", + connector->name); + DRM_DEBUG_KMS("in favor of %s\n", + conflict->name); current_status = connector_status_disconnected; } break; @@ -787,7 +851,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", - drm_get_connector_name(connector)); + connector->name); ret = connector_status_connected; } else { radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); @@ -1010,12 +1074,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", - drm_get_connector_name(connector)); + connector->name); /* rs690 seems to have a problem with connectors not existing and always * return a block of 0's. If we see this just stop polling on this output */ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { ret = connector_status_disconnected; - DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); + DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", + connector->name); radeon_connector->ddc_bus = NULL; } else { ret = connector_status_connected; @@ -1226,17 +1291,15 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector, (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) return MODE_OK; - else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) { - if (ASIC_IS_DCE6(rdev)) { - /* HDMI 1.3+ supports max clock of 340 Mhz */ - if (mode->clock > 340000) - return MODE_CLOCK_HIGH; - else - return MODE_OK; - } else + else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + /* HDMI 1.3+ supports max clock of 340 Mhz */ + if (mode->clock > 340000) return MODE_CLOCK_HIGH; - } else + else + return MODE_OK; + } else { return MODE_CLOCK_HIGH; + } } /* check against the max pixel clock */ @@ -1261,21 +1324,6 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = { .force = radeon_dvi_force, }; -static void radeon_dp_connector_destroy(struct drm_connector *connector) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; - - if (radeon_connector->edid) - kfree(radeon_connector->edid); - if (radeon_dig_connector->dp_i2c_bus) - radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); - kfree(radeon_connector->con_priv); - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); - kfree(connector); -} - static int radeon_dp_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -1402,7 +1450,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector) struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE5(rdev) && - (rdev->clock.dp_extclk >= 53900) && + (rdev->clock.default_dispclk >= 53900) && radeon_connector_encoder_is_hbr2(connector)) { return true; } @@ -1502,6 +1550,8 @@ out: static int radeon_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; @@ -1532,14 +1582,23 @@ static int radeon_dp_mode_valid(struct drm_connector *connector, return MODE_PANEL; } } - return MODE_OK; } else { if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || - (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) + (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { return radeon_dp_mode_valid_helper(connector, mode); - else - return MODE_OK; + } else { + if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + /* HDMI 1.3+ supports max clock of 340 Mhz */ + if (mode->clock > 340000) + return MODE_CLOCK_HIGH; + } else { + if (mode->clock > 165000) + return MODE_CLOCK_HIGH; + } + } } + + return MODE_OK; } static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { @@ -1553,7 +1612,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, - .destroy = radeon_dp_connector_destroy, + .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1562,7 +1621,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, - .destroy = radeon_dp_connector_destroy, + .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1571,7 +1630,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, - .destroy = radeon_dp_connector_destroy, + .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; @@ -1595,6 +1654,7 @@ radeon_add_atom_connector(struct drm_device *dev, uint32_t subpixel_order = SubPixelNone; bool shared_ddc = false; bool is_dp_bridge = false; + bool has_aux = false; if (connector_type == DRM_MODE_CONNECTOR_Unknown) return; @@ -1667,15 +1727,10 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; if (i2c_bus->valid) { - /* add DP i2c bus */ - if (connector_type == DRM_MODE_CONNECTOR_eDP) - radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); - else - radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); - if (!radeon_dig_connector->dp_i2c_bus) - DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); - if (!radeon_connector->ddc_bus) + if (radeon_connector->ddc_bus) + has_aux = true; + else DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } switch (connector_type) { @@ -1890,12 +1945,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { - /* add DP i2c bus */ - radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); - if (!radeon_dig_connector->dp_i2c_bus) - DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); - if (!radeon_connector->ddc_bus) + if (radeon_connector->ddc_bus) + has_aux = true; + else DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; @@ -1937,12 +1990,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { - /* add DP i2c bus */ - radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); - if (!radeon_dig_connector->dp_i2c_bus) - DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); - if (!radeon_connector->ddc_bus) + if (radeon_connector->ddc_bus) + has_aux = true; + else DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_object_attach_property(&radeon_connector->base.base, @@ -2000,6 +2051,10 @@ radeon_add_atom_connector(struct drm_device *dev, connector->display_info.subpixel_order = subpixel_order; drm_sysfs_connector_add(connector); + + if (has_aux) + radeon_dp_aux_init(radeon_connector); + return; failed: diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index dfb5a1db87d..ae763f60c8a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -24,16 +24,59 @@ * Authors: * Jerome Glisse <glisse@freedesktop.org> */ +#include <linux/list_sort.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> #include "radeon_reg.h" #include "radeon.h" #include "radeon_trace.h" +#define RADEON_CS_MAX_PRIORITY 32u +#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1) + +/* This is based on the bucket sort with O(n) time complexity. + * An item with priority "i" is added to bucket[i]. The lists are then + * concatenated in descending order. + */ +struct radeon_cs_buckets { + struct list_head bucket[RADEON_CS_NUM_BUCKETS]; +}; + +static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) +{ + unsigned i; + + for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) + INIT_LIST_HEAD(&b->bucket[i]); +} + +static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, + struct list_head *item, unsigned priority) +{ + /* Since buffers which appear sooner in the relocation list are + * likely to be used more often than buffers which appear later + * in the list, the sort mustn't change the ordering of buffers + * with the same priority, i.e. it must be stable. + */ + list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); +} + +static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, + struct list_head *out_list) +{ + unsigned i; + + /* Connect the sorted buckets in the output list. */ + for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) { + list_splice(&b->bucket[i], out_list); + } +} + static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) { struct drm_device *ddev = p->rdev->ddev; struct radeon_cs_chunk *chunk; + struct radeon_cs_buckets buckets; unsigned i, j; bool duplicate; @@ -52,8 +95,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) if (p->relocs == NULL) { return -ENOMEM; } + + radeon_cs_buckets_init(&buckets); + for (i = 0; i < p->nrelocs; i++) { struct drm_radeon_cs_reloc *r; + unsigned priority; duplicate = false; r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; @@ -78,8 +125,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) } p->relocs_ptr[i] = &p->relocs[i]; p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); - p->relocs[i].lobj.bo = p->relocs[i].robj; - p->relocs[i].lobj.written = !!r->write_domain; + + /* The userspace buffer priorities are from 0 to 15. A higher + * number means the buffer is more important. + * Also, the buffers used for write have a higher priority than + * the buffers used for read only, which doubles the range + * to 0 to 31. 32 is reserved for the kernel driver. + */ + priority = (r->flags & 0xf) * 2 + !!r->write_domain; /* the first reloc of an UVD job is the msg and that must be in VRAM, also but everything into VRAM on AGP cards to avoid @@ -87,29 +140,44 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) if (p->ring == R600_RING_TYPE_UVD_INDEX && (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { /* TODO: is this still needed for NI+ ? */ - p->relocs[i].lobj.domain = + p->relocs[i].prefered_domains = RADEON_GEM_DOMAIN_VRAM; - p->relocs[i].lobj.alt_domain = + p->relocs[i].allowed_domains = RADEON_GEM_DOMAIN_VRAM; + /* prioritize this over any other relocation */ + priority = RADEON_CS_MAX_PRIORITY; } else { uint32_t domain = r->write_domain ? r->write_domain : r->read_domains; - p->relocs[i].lobj.domain = domain; + if (domain & RADEON_GEM_DOMAIN_CPU) { + DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " + "for command submission\n"); + return -EINVAL; + } + + p->relocs[i].prefered_domains = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; - p->relocs[i].lobj.alt_domain = domain; + p->relocs[i].allowed_domains = domain; } - p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; + p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].handle = r->handle; - radeon_bo_list_add_object(&p->relocs[i].lobj, - &p->validated); + radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, + priority); } - return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring); + + radeon_cs_buckets_get_list(&buckets, &p->validated); + + if (p->cs_flags & RADEON_CS_USE_VM) + p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, + &p->validated); + + return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); } static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) @@ -147,6 +215,10 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority case RADEON_CS_RING_UVD: p->ring = R600_RING_TYPE_UVD_INDEX; break; + case RADEON_CS_RING_VCE: + /* TODO: only use the low priority ring for now */ + p->ring = TN_RING_TYPE_VCE1_INDEX; + break; } return 0; } @@ -276,16 +348,33 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EINVAL; /* we only support VM on some SI+ rings */ - if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { - DRM_ERROR("Ring %d requires VM!\n", p->ring); - return -EINVAL; + if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { + if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { + DRM_ERROR("Ring %d requires VM!\n", p->ring); + return -EINVAL; + } + } else { + if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { + DRM_ERROR("VM not supported on ring %d!\n", + p->ring); + return -EINVAL; + } } } return 0; } +static int cmp_size_smaller_first(void *priv, struct list_head *a, + struct list_head *b) +{ + struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); + struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); + + /* Sort A before B if A is smaller. */ + return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; +} + /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. @@ -299,6 +388,18 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo unsigned i; if (!error) { + /* Sort the buffer list from the smallest to largest buffer, + * which affects the order of buffers in the LRU list. + * This assures that the smallest buffers are added first + * to the LRU list, so they are likely to be later evicted + * first, instead of large buffers whose eviction is more + * expensive. + * + * This slightly lowers the number of bytes moved by TTM + * per frame under memory pressure. + */ + list_sort(NULL, &parser->validated, cmp_size_smaller_first); + ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, parser->ib.fence); @@ -316,6 +417,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo kfree(parser->track); kfree(parser->relocs); kfree(parser->relocs_ptr); + kfree(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); @@ -343,6 +445,9 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, if (parser->ring == R600_RING_TYPE_UVD_INDEX) radeon_uvd_note_usage(rdev); + else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || + (parser->ring == TN_RING_TYPE_VCE2_INDEX)) + radeon_vce_note_usage(rdev); radeon_cs_sync_rings(parser); r = radeon_ib_schedule(rdev, &parser->ib, NULL); @@ -352,24 +457,48 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, return r; } -static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, +static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, struct radeon_vm *vm) { - struct radeon_device *rdev = parser->rdev; - struct radeon_bo_list *lobj; - struct radeon_bo *bo; - int r; + struct radeon_device *rdev = p->rdev; + struct radeon_bo_va *bo_va; + int i, r; - r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); - if (r) { + r = radeon_vm_update_page_directory(rdev, vm); + if (r) return r; + + r = radeon_vm_clear_freed(rdev, vm); + if (r) + return r; + + if (vm->ib_bo_va == NULL) { + DRM_ERROR("Tmp BO not in VM!\n"); + return -EINVAL; } - list_for_each_entry(lobj, &parser->validated, tv.head) { - bo = lobj->bo; - r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem); - if (r) { - return r; + + r = radeon_vm_bo_update(rdev, vm->ib_bo_va, + &rdev->ring_tmp_bo.bo->tbo.mem); + if (r) + return r; + + for (i = 0; i < p->nrelocs; i++) { + struct radeon_bo *bo; + + /* ignore duplicates */ + if (p->relocs_ptr[i] != &p->relocs[i]) + continue; + + bo = p->relocs[i].robj; + bo_va = radeon_vm_bo_find(vm, bo); + if (bo_va == NULL) { + dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); + return -EINVAL; } + + r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); + if (r) + return r; } return 0; } @@ -401,20 +530,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if (parser->ring == R600_RING_TYPE_UVD_INDEX) radeon_uvd_note_usage(rdev); - mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); - r = radeon_vm_alloc_pt(rdev, vm); - if (r) { - goto out; - } r = radeon_bo_vm_update_pte(parser, vm); if (r) { goto out; } radeon_cs_sync_rings(parser); radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); - radeon_semaphore_sync_to(parser->ib.semaphore, - radeon_vm_grab_id(rdev, vm, parser->ring)); if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { @@ -423,14 +545,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, r = radeon_ib_schedule(rdev, &parser->ib, NULL); } - if (!r) { - radeon_vm_fence(rdev, vm, parser->ib.fence); - } - out: - radeon_vm_add_to_lru(rdev, vm); mutex_unlock(&vm->mutex); - mutex_unlock(&rdev->vm_manager.lock); return r; } @@ -698,9 +814,9 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, /* FIXME: we assume reloc size is 4 dwords */ if (nomm) { *cs_reloc = p->relocs; - (*cs_reloc)->lobj.gpu_offset = + (*cs_reloc)->gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; - (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; + (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; } else *cs_reloc = p->relocs_ptr[(idx / 4)]; return 0; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b012cbbc3ed..697add2cd4e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -99,14 +99,18 @@ static const char radeon_family_name[][16] = { "KAVERI", "KABINI", "HAWAII", + "MULLINS", "LAST", }; -#if defined(CONFIG_VGA_SWITCHEROO) -bool radeon_is_px(void); -#else -static inline bool radeon_is_px(void) { return false; } -#endif +bool radeon_is_px(struct drm_device *dev) +{ + struct radeon_device *rdev = dev->dev_private; + + if (rdev->flags & RADEON_IS_PX) + return true; + return false; +} /** * radeon_program_register_sequence - program an array of registers. @@ -1048,6 +1052,43 @@ static void radeon_check_arguments(struct radeon_device *rdev) radeon_agpmode = 0; break; } + + if (!radeon_check_pot_argument(radeon_vm_size)) { + dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", + radeon_vm_size); + radeon_vm_size = 4; + } + + if (radeon_vm_size < 1) { + dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n", + radeon_vm_size); + radeon_vm_size = 4; + } + + /* + * Max GPUVM size for Cayman, SI and CI are 40 bits. + */ + if (radeon_vm_size > 1024) { + dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n", + radeon_vm_size); + radeon_vm_size = 4; + } + + /* defines number of bits in page table versus page directory, + * a page is 4KB so we have 12 bits offset, minimum 9 bits in the + * page table and the remaining bits are in the page directory */ + if (radeon_vm_block_size < 9) { + dev_warn(rdev->dev, "VM page table size (%d) too small\n", + radeon_vm_block_size); + radeon_vm_block_size = 9; + } + + if (radeon_vm_block_size > 24 || + (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) { + dev_warn(rdev->dev, "VM page table size (%d) too large\n", + radeon_vm_block_size); + radeon_vm_block_size = 9; + } } /** @@ -1082,7 +1123,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero { struct drm_device *dev = pci_get_drvdata(pdev); - if (radeon_is_px() && state == VGA_SWITCHEROO_OFF) + if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -1122,12 +1163,13 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - bool can_switch; - spin_lock(&dev->count_lock); - can_switch = (dev->open_count == 0); - spin_unlock(&dev->count_lock); - return can_switch; + /* + * FIXME: open_count is protected by drm_global_mutex but that would lead to + * locking inversion with the driver load path. And the access here is + * completely racy anyway. So don't bother with locking for now. + */ + return dev->open_count == 0; } static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { @@ -1191,20 +1233,17 @@ int radeon_device_init(struct radeon_device *rdev, r = radeon_gem_init(rdev); if (r) return r; - /* initialize vm here */ - mutex_init(&rdev->vm_manager.lock); + + radeon_check_arguments(rdev); /* Adjust VM size here. - * Currently set to 4GB ((1 << 20) 4k pages). - * Max GPUVM size for cayman and SI is 40 bits. + * Max GPUVM size for cayman+ is 40 bits. */ - rdev->vm_manager.max_pfn = 1 << 20; - INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); + rdev->vm_manager.max_pfn = radeon_vm_size << 18; /* Set asic functions */ r = radeon_asic_init(rdev); if (r) return r; - radeon_check_arguments(rdev); /* all of the newer IGP chips have an internal gart * However some rs4xx report as AGP, so remove that here. @@ -1303,9 +1342,7 @@ int radeon_device_init(struct radeon_device *rdev, * ignore it */ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); - if (radeon_runtime_pm == 1) - runtime = true; - if ((radeon_runtime_pm == -1) && radeon_is_px()) + if (rdev->flags & RADEON_IS_PX) runtime = true; vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); if (runtime) @@ -1426,7 +1463,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); + struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); struct radeon_bo *robj; if (rfb == NULL || rfb->obj == NULL) { @@ -1445,10 +1482,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) /* evict vram memory */ radeon_bo_evict_vram(rdev); - mutex_lock(&rdev->ring_lock); /* wait for gpu to finish processing current batch */ for (i = 0; i < RADEON_NUM_RINGS; i++) { - r = radeon_fence_wait_empty_locked(rdev, i); + r = radeon_fence_wait_empty(rdev, i); if (r) { /* delay GPU reset to resume */ force_completion = true; @@ -1457,7 +1493,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) if (force_completion) { radeon_fence_driver_force_completion(rdev); } - mutex_unlock(&rdev->ring_lock); radeon_save_bios_scratch_regs(rdev); @@ -1521,22 +1556,20 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (r) DRM_ERROR("ib ring test failed (%d).\n", r); - if (rdev->pm.dpm_enabled) { + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { /* do dpm late init */ r = radeon_pm_late_init(rdev); if (r) { rdev->pm.dpm_enabled = false; DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); } + } else { + /* resume old pm late */ + radeon_pm_resume(rdev); } radeon_restore_bios_scratch_regs(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); - console_unlock(); - } - /* init dig PHYs, disp eng pll */ if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); @@ -1552,13 +1585,25 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) /* reset hpd state */ radeon_hpd_init(rdev); /* blat the mode back in */ - drm_helper_resume_force_mode(dev); - /* turn on display hw */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + if (fbcon) { + drm_helper_resume_force_mode(dev); + /* turn on display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } } drm_kms_helper_poll_enable(dev); + + /* set the power state here in case we are a PX system or headless */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) + radeon_pm_compute_clocks(rdev); + + if (fbcon) { + radeon_fbdev_set_suspend(rdev, 0); + console_unlock(); + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d680608f6f5..bf25061c8ac 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -34,6 +34,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> +#include <linux/gcd.h> + static void avivo_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -64,7 +66,8 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) (radeon_crtc->lut_b[i] << 0)); } - WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); + /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */ + WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1); } static void dce4_crtc_load_lut(struct drm_crtc *crtc) @@ -247,16 +250,21 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); drm_crtc_cleanup(crtc); + destroy_workqueue(radeon_crtc->flip_queue); kfree(radeon_crtc); } -/* - * Handle unpin events outside the interrupt handler proper. +/** + * radeon_unpin_work_func - unpin old buffer object + * + * @__work - kernel work item + * + * Unpin the old frame buffer object outside of the interrupt handler */ static void radeon_unpin_work_func(struct work_struct *__work) { - struct radeon_unpin_work *work = - container_of(__work, struct radeon_unpin_work, work); + struct radeon_flip_work *work = + container_of(__work, struct radeon_flip_work, unpin_work); int r; /* unpin of the old buffer */ @@ -274,33 +282,28 @@ static void radeon_unpin_work_func(struct work_struct *__work) kfree(work); } -void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) +void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; - struct radeon_unpin_work *work; unsigned long flags; u32 update_pending; int vpos, hpos; + /* can happen during initialization */ + if (radeon_crtc == NULL) + return; + spin_lock_irqsave(&rdev->ddev->event_lock, flags); - work = radeon_crtc->unpin_work; - if (work == NULL || - (work->fence && !radeon_fence_signaled(work->fence))) { + if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " + "RADEON_FLIP_SUBMITTED(%d)\n", + radeon_crtc->flip_status, + RADEON_FLIP_SUBMITTED); spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } - /* New pageflip, or just completion of a previous one? */ - if (!radeon_crtc->deferred_flip_completion) { - /* do the flip (mmio) */ - update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); - } else { - /* This is just a completion of a flip queued in crtc - * at last invocation. Make sure we go directly to - * completion routine. - */ - update_pending = 0; - radeon_crtc->deferred_flip_completion = 0; - } + + update_pending = radeon_page_flip_pending(rdev, crtc_id); /* Has the pageflip already completed in crtc, or is it certain * to complete in this vblank? @@ -318,19 +321,43 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) */ update_pending = 0; } - if (update_pending) { - /* crtc didn't flip in this target vblank interval, - * but flip is pending in crtc. It will complete it - * in next vblank interval, so complete the flip at - * next vblank irq. - */ - radeon_crtc->deferred_flip_completion = 1; + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + if (!update_pending) + radeon_crtc_handle_flip(rdev, crtc_id); +} + +/** + * radeon_crtc_handle_flip - page flip completed + * + * @rdev: radeon device pointer + * @crtc_id: crtc number this event is for + * + * Called when we are sure that a page flip for this crtc is completed. + */ +void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + struct radeon_flip_work *work; + unsigned long flags; + + /* this can happen at init */ + if (radeon_crtc == NULL) + return; + + spin_lock_irqsave(&rdev->ddev->event_lock, flags); + work = radeon_crtc->flip_work; + if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " + "RADEON_FLIP_SUBMITTED(%d)\n", + radeon_crtc->flip_status, + RADEON_FLIP_SUBMITTED); spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } - /* Pageflip (will be) certainly completed in this vblank. Clean up. */ - radeon_crtc->unpin_work = NULL; + /* Pageflip completed. Clean up. */ + radeon_crtc->flip_status = RADEON_FLIP_NONE; + radeon_crtc->flip_work = NULL; /* wakeup userspace */ if (work->event) @@ -339,9 +366,59 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); - radeon_fence_unref(&work->fence); - radeon_post_page_flip(work->rdev, work->crtc_id); - schedule_work(&work->work); + radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); + queue_work(radeon_crtc->flip_queue, &work->unpin_work); +} + +/** + * radeon_flip_work_func - page flip framebuffer + * + * @work - kernel work item + * + * Wait for the buffer object to become idle and do the actual page flip + */ +static void radeon_flip_work_func(struct work_struct *__work) +{ + struct radeon_flip_work *work = + container_of(__work, struct radeon_flip_work, flip_work); + struct radeon_device *rdev = work->rdev; + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; + + struct drm_crtc *crtc = &radeon_crtc->base; + unsigned long flags; + int r; + + down_read(&rdev->exclusive_lock); + if (work->fence) { + r = radeon_fence_wait(work->fence, false); + if (r == -EDEADLK) { + up_read(&rdev->exclusive_lock); + r = radeon_gpu_reset(rdev); + down_read(&rdev->exclusive_lock); + } + if (r) + DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); + + /* We continue with the page flip even if we failed to wait on + * the fence, otherwise the DRM core and userspace will be + * confused about which BO the CRTC is scanning out + */ + + radeon_fence_unref(&work->fence); + } + + /* We borrow the event spin lock for protecting flip_status */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + /* set the proper interrupt */ + radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); + + /* do the flip (mmio) */ + radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); + + radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + up_read(&rdev->exclusive_lock); } static int radeon_crtc_page_flip(struct drm_crtc *crtc, @@ -355,69 +432,61 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, struct radeon_framebuffer *old_radeon_fb; struct radeon_framebuffer *new_radeon_fb; struct drm_gem_object *obj; - struct radeon_bo *rbo; - struct radeon_unpin_work *work; + struct radeon_flip_work *work; + struct radeon_bo *new_rbo; + uint32_t tiling_flags, pitch_pixels; + uint64_t base; unsigned long flags; - u32 tiling_flags, pitch_pixels; - u64 base; int r; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) return -ENOMEM; - work->event = event; + INIT_WORK(&work->flip_work, radeon_flip_work_func); + INIT_WORK(&work->unpin_work, radeon_unpin_work_func); + work->rdev = rdev; work->crtc_id = radeon_crtc->crtc_id; - old_radeon_fb = to_radeon_framebuffer(crtc->fb); - new_radeon_fb = to_radeon_framebuffer(fb); + work->event = event; + /* schedule unpin of the old buffer */ + old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); obj = old_radeon_fb->obj; + /* take a reference to the old object */ drm_gem_object_reference(obj); - rbo = gem_to_radeon_bo(obj); - work->old_rbo = rbo; - obj = new_radeon_fb->obj; - rbo = gem_to_radeon_bo(obj); + work->old_rbo = gem_to_radeon_bo(obj); - spin_lock(&rbo->tbo.bdev->fence_lock); - if (rbo->tbo.sync_obj) - work->fence = radeon_fence_ref(rbo->tbo.sync_obj); - spin_unlock(&rbo->tbo.bdev->fence_lock); + new_radeon_fb = to_radeon_framebuffer(fb); + obj = new_radeon_fb->obj; + new_rbo = gem_to_radeon_bo(obj); - INIT_WORK(&work->work, radeon_unpin_work_func); - - /* We borrow the event spin lock for protecting unpin_work */ - spin_lock_irqsave(&dev->event_lock, flags); - if (radeon_crtc->unpin_work) { - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - r = -EBUSY; - goto unlock_free; - } - radeon_crtc->unpin_work = work; - radeon_crtc->deferred_flip_completion = 0; - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_lock(&new_rbo->tbo.bdev->fence_lock); + if (new_rbo->tbo.sync_obj) + work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); + spin_unlock(&new_rbo->tbo.bdev->fence_lock); /* pin the new buffer */ - DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", - work->old_rbo, rbo); + DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", + work->old_rbo, new_rbo); - r = radeon_bo_reserve(rbo, false); + r = radeon_bo_reserve(new_rbo, false); if (unlikely(r != 0)) { DRM_ERROR("failed to reserve new rbo buffer before flip\n"); - goto pflip_cleanup; + goto cleanup; } /* Only 27 bit offset for legacy CRTC */ - r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); if (unlikely(r != 0)) { - radeon_bo_unreserve(rbo); + radeon_bo_unreserve(new_rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); - goto pflip_cleanup; + goto cleanup; } - radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); - radeon_bo_unreserve(rbo); + radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); + radeon_bo_unreserve(new_rbo); if (!ASIC_IS_AVIVO(rdev)) { /* crtc offset is from display base addr not FB location */ @@ -454,41 +523,49 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, } base &= ~7; } + work->base = base; - spin_lock_irqsave(&dev->event_lock, flags); - work->new_crtc_base = base; - spin_unlock_irqrestore(&dev->event_lock, flags); - - /* update crtc fb */ - crtc->fb = fb; - - r = drm_vblank_get(dev, radeon_crtc->crtc_id); + r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); if (r) { DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup1; + goto pflip_cleanup; } - /* set the proper interrupt */ - radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); + /* We borrow the event spin lock for protecting flip_work */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + r = -EBUSY; + goto vblank_cleanup; + } + radeon_crtc->flip_status = RADEON_FLIP_PENDING; + radeon_crtc->flip_work = work; + + /* update crtc fb */ + crtc->primary->fb = fb; + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + queue_work(radeon_crtc->flip_queue, &work->flip_work); return 0; -pflip_cleanup1: - if (unlikely(radeon_bo_reserve(rbo, false) != 0)) { +vblank_cleanup: + drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); + +pflip_cleanup: + if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); - goto pflip_cleanup; + goto cleanup; } - if (unlikely(radeon_bo_unpin(rbo) != 0)) { + if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } - radeon_bo_unreserve(rbo); + radeon_bo_unreserve(new_rbo); -pflip_cleanup: - spin_lock_irqsave(&dev->event_lock, flags); - radeon_crtc->unpin_work = NULL; -unlock_free: - spin_unlock_irqrestore(&dev->event_lock, flags); - drm_gem_object_unreference_unlocked(old_radeon_fb->obj); +cleanup: + drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); radeon_fence_unref(&work->fence); kfree(work); @@ -562,6 +639,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; + radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); rdev->mode_info.crtcs[index] = radeon_crtc; if (rdev->family >= CHIP_BONAIRE) { @@ -571,6 +649,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_crtc->max_cursor_width = CURSOR_WIDTH; radeon_crtc->max_cursor_height = CURSOR_HEIGHT; } + dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; + dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; #if 0 radeon_crtc->mode_set.crtc = &radeon_crtc->base; @@ -653,7 +733,7 @@ static void radeon_print_display_setup(struct drm_device *dev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); DRM_INFO("Connector %d:\n", i); - DRM_INFO(" %s\n", drm_get_connector_name(connector)); + DRM_INFO(" %s\n", connector->name); if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); if (radeon_connector->ddc_bus) { @@ -749,25 +829,28 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) struct radeon_device *rdev = dev->dev_private; int ret = 0; + /* don't leak the edid if we already fetched it in detect() */ + if (radeon_connector->edid) + goto got_edid; + /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != ENCODER_OBJECT_ID_NONE) { - struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; - - if (dig->dp_i2c_bus) + if (radeon_connector->ddc_bus->has_aux) radeon_connector->edid = drm_get_edid(&radeon_connector->base, - &dig->dp_i2c_bus->adapter); + &radeon_connector->ddc_bus->aux.ddc); } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || - dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) + dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && + radeon_connector->ddc_bus->has_aux) radeon_connector->edid = drm_get_edid(&radeon_connector->base, - &dig->dp_i2c_bus->adapter); + &radeon_connector->ddc_bus->aux.ddc); else if (radeon_connector->ddc_bus && !radeon_connector->edid) radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); @@ -788,8 +871,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } if (radeon_connector->edid) { +got_edid: drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); + drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); return ret; } drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); @@ -797,66 +882,89 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) } /* avivo */ -static void avivo_get_fb_div(struct radeon_pll *pll, - u32 target_clock, - u32 post_div, - u32 ref_div, - u32 *fb_div, - u32 *frac_fb_div) -{ - u32 tmp = post_div * ref_div; - tmp *= target_clock; - *fb_div = tmp / pll->reference_freq; - *frac_fb_div = tmp % pll->reference_freq; +/** + * avivo_reduce_ratio - fractional number reduction + * + * @nom: nominator + * @den: denominator + * @nom_min: minimum value for nominator + * @den_min: minimum value for denominator + * + * Find the greatest common divisor and apply it on both nominator and + * denominator, but make nominator and denominator are at least as large + * as their minimum values. + */ +static void avivo_reduce_ratio(unsigned *nom, unsigned *den, + unsigned nom_min, unsigned den_min) +{ + unsigned tmp; + + /* reduce the numbers to a simpler ratio */ + tmp = gcd(*nom, *den); + *nom /= tmp; + *den /= tmp; + + /* make sure nominator is large enough */ + if (*nom < nom_min) { + tmp = DIV_ROUND_UP(nom_min, *nom); + *nom *= tmp; + *den *= tmp; + } - if (*fb_div > pll->max_feedback_div) - *fb_div = pll->max_feedback_div; - else if (*fb_div < pll->min_feedback_div) - *fb_div = pll->min_feedback_div; + /* make sure the denominator is large enough */ + if (*den < den_min) { + tmp = DIV_ROUND_UP(den_min, *den); + *nom *= tmp; + *den *= tmp; + } } -static u32 avivo_get_post_div(struct radeon_pll *pll, - u32 target_clock) +/** + * avivo_get_fb_ref_div - feedback and ref divider calculation + * + * @nom: nominator + * @den: denominator + * @post_div: post divider + * @fb_div_max: feedback divider maximum + * @ref_div_max: reference divider maximum + * @fb_div: resulting feedback divider + * @ref_div: resulting reference divider + * + * Calculate feedback and reference divider for a given post divider. Makes + * sure we stay within the limits. + */ +static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, + unsigned fb_div_max, unsigned ref_div_max, + unsigned *fb_div, unsigned *ref_div) { - u32 vco, post_div, tmp; - - if (pll->flags & RADEON_PLL_USE_POST_DIV) - return pll->post_div; + /* limit reference * post divider to a maximum */ + ref_div_max = max(min(100 / post_div, ref_div_max), 1u); - if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { - if (pll->flags & RADEON_PLL_IS_LCD) - vco = pll->lcd_pll_out_min; - else - vco = pll->pll_out_min; - } else { - if (pll->flags & RADEON_PLL_IS_LCD) - vco = pll->lcd_pll_out_max; - else - vco = pll->pll_out_max; - } - - post_div = vco / target_clock; - tmp = vco % target_clock; + /* get matching reference and feedback divider */ + *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); + *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); - if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) { - if (tmp) - post_div++; - } else { - if (!tmp) - post_div--; + /* limit fb divider to its maximum */ + if (*fb_div > fb_div_max) { + *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); + *fb_div = fb_div_max; } - - if (post_div > pll->max_post_div) - post_div = pll->max_post_div; - else if (post_div < pll->min_post_div) - post_div = pll->min_post_div; - - return post_div; } -#define MAX_TOLERANCE 10 - +/** + * radeon_compute_pll_avivo - compute PLL paramaters + * + * @pll: information about the PLL + * @dot_clock_p: resulting pixel clock + * fb_div_p: resulting feedback divider + * frac_fb_div_p: fractional part of the feedback divider + * ref_div_p: resulting reference divider + * post_div_p: resulting reference divider + * + * Try to calculate the PLL parameters to generate the given frequency: + * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) + */ void radeon_compute_pll_avivo(struct radeon_pll *pll, u32 freq, u32 *dot_clock_p, @@ -865,53 +973,135 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, u32 *ref_div_p, u32 *post_div_p) { - u32 target_clock = freq / 10; - u32 post_div = avivo_get_post_div(pll, target_clock); - u32 ref_div = pll->min_ref_div; - u32 fb_div = 0, frac_fb_div = 0, tmp; + unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? + freq : freq / 10; - if (pll->flags & RADEON_PLL_USE_REF_DIV) - ref_div = pll->reference_div; + unsigned fb_div_min, fb_div_max, fb_div; + unsigned post_div_min, post_div_max, post_div; + unsigned ref_div_min, ref_div_max, ref_div; + unsigned post_div_best, diff_best; + unsigned nom, den; + + /* determine allowed feedback divider range */ + fb_div_min = pll->min_feedback_div; + fb_div_max = pll->max_feedback_div; if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { - avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div); - frac_fb_div = (100 * frac_fb_div) / pll->reference_freq; - if (frac_fb_div >= 5) { - frac_fb_div -= 5; - frac_fb_div = frac_fb_div / 10; - frac_fb_div++; + fb_div_min *= 10; + fb_div_max *= 10; + } + + /* determine allowed ref divider range */ + if (pll->flags & RADEON_PLL_USE_REF_DIV) + ref_div_min = pll->reference_div; + else + ref_div_min = pll->min_ref_div; + + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && + pll->flags & RADEON_PLL_USE_REF_DIV) + ref_div_max = pll->reference_div; + else + ref_div_max = pll->max_ref_div; + + /* determine allowed post divider range */ + if (pll->flags & RADEON_PLL_USE_POST_DIV) { + post_div_min = pll->post_div; + post_div_max = pll->post_div; + } else { + unsigned vco_min, vco_max; + + if (pll->flags & RADEON_PLL_IS_LCD) { + vco_min = pll->lcd_pll_out_min; + vco_max = pll->lcd_pll_out_max; + } else { + vco_min = pll->pll_out_min; + vco_max = pll->pll_out_max; } - if (frac_fb_div >= 10) { - fb_div++; - frac_fb_div = 0; + + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { + vco_min *= 10; + vco_max *= 10; } - } else { - while (ref_div <= pll->max_ref_div) { - avivo_get_fb_div(pll, target_clock, post_div, ref_div, - &fb_div, &frac_fb_div); - if (frac_fb_div >= (pll->reference_freq / 2)) - fb_div++; - frac_fb_div = 0; - tmp = (pll->reference_freq * fb_div) / (post_div * ref_div); - tmp = (tmp * 10000) / target_clock; - - if (tmp > (10000 + MAX_TOLERANCE)) - ref_div++; - else if (tmp >= (10000 - MAX_TOLERANCE)) - break; - else - ref_div++; + + post_div_min = vco_min / target_clock; + if ((target_clock * post_div_min) < vco_min) + ++post_div_min; + if (post_div_min < pll->min_post_div) + post_div_min = pll->min_post_div; + + post_div_max = vco_max / target_clock; + if ((target_clock * post_div_max) > vco_max) + --post_div_max; + if (post_div_max > pll->max_post_div) + post_div_max = pll->max_post_div; + } + + /* represent the searched ratio as fractional number */ + nom = target_clock; + den = pll->reference_freq; + + /* reduce the numbers to a simpler ratio */ + avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min); + + /* now search for a post divider */ + if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) + post_div_best = post_div_min; + else + post_div_best = post_div_max; + diff_best = ~0; + + for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { + unsigned diff; + avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, + ref_div_max, &fb_div, &ref_div); + diff = abs(target_clock - (pll->reference_freq * fb_div) / + (ref_div * post_div)); + + if (diff < diff_best || (diff == diff_best && + !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) { + + post_div_best = post_div; + diff_best = diff; + } + } + post_div = post_div_best; + + /* get the feedback and reference divider for the optimal value */ + avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, + &fb_div, &ref_div); + + /* reduce the numbers to a simpler ratio once more */ + /* this also makes sure that the reference divider is large enough */ + avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); + + /* avoid high jitter with small fractional dividers */ + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { + fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); + if (fb_div < fb_div_min) { + unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); + fb_div *= tmp; + ref_div *= tmp; } } - *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) / - (ref_div * post_div * 10); - *fb_div_p = fb_div; - *frac_fb_div_p = frac_fb_div; + /* and finally save the result */ + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { + *fb_div_p = fb_div / 10; + *frac_fb_div_p = fb_div % 10; + } else { + *fb_div_p = fb_div; + *frac_fb_div_p = 0; + } + + *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + + (pll->reference_freq * *frac_fb_div_p)) / + (ref_div * post_div * 10); *ref_div_p = ref_div; *post_div_p = post_div; - DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n", - *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div); + + DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", + freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, + ref_div, post_div); } /* pre-avivo */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index ec8c388eec1..e9e36108424 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -78,9 +78,13 @@ * 2.34.0 - Add CIK tiling mode array query * 2.35.0 - Add CIK macrotile mode array query * 2.36.0 - Fix CIK DCE tiling setup + * 2.37.0 - allow GS ring setup on r6xx/r7xx + * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN), + * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG + * 2.39.0 - Add INFO query for number of active CUs */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 36 +#define KMS_DRIVER_MINOR 39 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -112,6 +116,7 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime); +extern bool radeon_is_px(struct drm_device *dev); extern const struct drm_ioctl_desc radeon_ioctls_kms[]; extern int radeon_max_kms_ioctl; int radeon_mmap(struct file *filp, struct vm_area_struct *vma); @@ -141,11 +146,9 @@ void radeon_debugfs_cleanup(struct drm_minor *minor); #if defined(CONFIG_VGA_SWITCHEROO) void radeon_register_atpx_handler(void); void radeon_unregister_atpx_handler(void); -bool radeon_is_px(void); #else static inline void radeon_register_atpx_handler(void) {} static inline void radeon_unregister_atpx_handler(void) {} -static inline bool radeon_is_px(void) { return false; } #endif int radeon_no_wb; @@ -170,6 +173,9 @@ int radeon_dpm = -1; int radeon_aspm = -1; int radeon_runtime_pm = -1; int radeon_hard_reset = 0; +int radeon_vm_size = 4; +int radeon_vm_block_size = 9; +int radeon_deep_color = 0; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -183,7 +189,7 @@ module_param_named(dynclks, radeon_dynclks, int, 0444); MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx"); module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444); -MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing"); +MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, radeon_vram_limit, int, 0600); MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)"); @@ -237,6 +243,15 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444); MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); module_param_named(hard_reset, radeon_hard_reset, int, 0444); +MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); +module_param_named(vm_size, radeon_vm_size, int, 0444); + +MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); +module_param_named(vm_block_size, radeon_vm_block_size, int, 0444); + +MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); +module_param_named(deep_color, radeon_deep_color, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; @@ -402,11 +417,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev) struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (radeon_runtime_pm == 0) - return -EINVAL; - - if (radeon_runtime_pm == -1 && !radeon_is_px()) - return -EINVAL; + if (!radeon_is_px(drm_dev)) { + pm_runtime_forbid(dev); + return -EBUSY; + } drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); @@ -427,10 +441,7 @@ static int radeon_pmops_runtime_resume(struct device *dev) struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (radeon_runtime_pm == 0) - return -EINVAL; - - if (radeon_runtime_pm == -1 && !radeon_is_px()) + if (!radeon_is_px(drm_dev)) return -EINVAL; drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -455,12 +466,8 @@ static int radeon_pmops_runtime_idle(struct device *dev) struct drm_device *drm_dev = pci_get_drvdata(pdev); struct drm_crtc *crtc; - if (radeon_runtime_pm == 0) - return -EBUSY; - - /* are we PX enabled? */ - if (radeon_runtime_pm == -1 && !radeon_is_px()) { - DRM_DEBUG_DRIVER("failing to power off - not px\n"); + if (!radeon_is_px(drm_dev)) { + pm_runtime_forbid(dev); return -EBUSY; } @@ -525,7 +532,6 @@ static struct drm_driver kms_driver = { DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, - .dev_priv_size = 0, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .preclose = radeon_driver_preclose_kms, diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 614ad549297..4b7b87f71a6 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -97,6 +97,7 @@ enum radeon_family { CHIP_KAVERI, CHIP_KABINI, CHIP_HAWAII, + CHIP_MULLINS, CHIP_LAST, }; @@ -115,6 +116,7 @@ enum radeon_chip_flags { RADEON_NEW_MEMMAP = 0x00400000UL, RADEON_IS_PCI = 0x00800000UL, RADEON_IS_IGPGART = 0x01000000UL, + RADEON_IS_PX = 0x02000000UL, }; #endif diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index c37cb79a948..913787085df 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -288,7 +288,6 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep - * @lock_ring: whether the ring should be locked or not * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. @@ -299,7 +298,7 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) * -EDEADLK is returned when a GPU lockup has been detected. */ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, - bool intr, bool lock_ring) + bool intr) { uint64_t last_seq[RADEON_NUM_RINGS]; bool signaled; @@ -358,9 +357,6 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, if (i != RADEON_NUM_RINGS) continue; - if (lock_ring) - mutex_lock(&rdev->ring_lock); - for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!target_seq[i]) continue; @@ -378,14 +374,9 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, /* remember that we need an reset */ rdev->needs_reset = true; - if (lock_ring) - mutex_unlock(&rdev->ring_lock); wake_up_all(&rdev->fence_queue); return -EDEADLK; } - - if (lock_ring) - mutex_unlock(&rdev->ring_lock); } } return 0; @@ -416,7 +407,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) return 0; - r = radeon_fence_wait_seq(fence->rdev, seq, intr, true); + r = radeon_fence_wait_seq(fence->rdev, seq, intr); if (r) return r; @@ -464,7 +455,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, if (num_rings == 0) return -ENOENT; - r = radeon_fence_wait_seq(rdev, seq, intr, true); + r = radeon_fence_wait_seq(rdev, seq, intr); if (r) { return r; } @@ -472,37 +463,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, } /** - * radeon_fence_wait_locked - wait for a fence to signal - * - * @fence: radeon fence object - * - * Wait for the requested fence to signal (all asics). - * Returns 0 if the fence has passed, error for all other cases. - */ -int radeon_fence_wait_locked(struct radeon_fence *fence) -{ - uint64_t seq[RADEON_NUM_RINGS] = {}; - int r; - - if (fence == NULL) { - WARN(1, "Querying an invalid fence : %p !\n", fence); - return -EINVAL; - } - - seq[fence->ring] = fence->seq; - if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) - return 0; - - r = radeon_fence_wait_seq(fence->rdev, seq, false, false); - if (r) - return r; - - fence->seq = RADEON_FENCE_SIGNALED_SEQ; - return 0; -} - -/** - * radeon_fence_wait_next_locked - wait for the next fence to signal + * radeon_fence_wait_next - wait for the next fence to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with @@ -511,7 +472,7 @@ int radeon_fence_wait_locked(struct radeon_fence *fence) * Returns 0 if the next fence has passed, error for all other cases. * Caller must hold ring lock. */ -int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) +int radeon_fence_wait_next(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; @@ -521,11 +482,11 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) already the last emited fence */ return -ENOENT; } - return radeon_fence_wait_seq(rdev, seq, false, false); + return radeon_fence_wait_seq(rdev, seq, false); } /** - * radeon_fence_wait_empty_locked - wait for all fences to signal + * radeon_fence_wait_empty - wait for all fences to signal * * @rdev: radeon device pointer * @ring: ring index the fence is associated with @@ -534,7 +495,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) * Returns 0 if the fences have passed, error for all other cases. * Caller must hold ring lock. */ -int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) +int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; int r; @@ -543,7 +504,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) if (!seq[ring]) return 0; - r = radeon_fence_wait_seq(rdev, seq, false, false); + r = radeon_fence_wait_seq(rdev, seq, false); if (r) { if (r == -EDEADLK) return -EDEADLK; @@ -794,7 +755,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { if (!rdev->fence_drv[ring].initialized) continue; - r = radeon_fence_wait_empty_locked(rdev, ring); + r = radeon_fence_wait_empty(rdev, ring); if (r) { /* no need to trigger GPU reset as we are unloading */ radeon_fence_driver_force_completion(rdev); @@ -858,15 +819,35 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) return 0; } +/** + * radeon_debugfs_gpu_reset - manually trigger a gpu reset + * + * Manually trigger a gpu reset at the next fence wait. + */ +static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + + down_read(&rdev->exclusive_lock); + seq_printf(m, "%d\n", rdev->needs_reset); + rdev->needs_reset = true; + up_read(&rdev->exclusive_lock); + + return 0; +} + static struct drm_info_list radeon_debugfs_fence_list[] = { {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, + {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL} }; #endif int radeon_debugfs_fence_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); + return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2); #else return 0; #endif diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a8f9b463bf2..2e723651069 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -28,8 +28,6 @@ #include <drm/drmP.h> #include <drm/radeon_drm.h> #include "radeon.h" -#include "radeon_reg.h" -#include "radeon_trace.h" /* * GART @@ -394,959 +392,3 @@ void radeon_gart_fini(struct radeon_device *rdev) radeon_dummy_page_fini(rdev); } - -/* - * GPUVM - * GPUVM is similar to the legacy gart on older asics, however - * rather than there being a single global gart table - * for the entire GPU, there are multiple VM page tables active - * at any given time. The VM page tables can contain a mix - * vram pages and system memory pages and system memory pages - * can be mapped as snooped (cached system pages) or unsnooped - * (uncached system pages). - * Each VM has an ID associated with it and there is a page table - * associated with each VMID. When execting a command buffer, - * the kernel tells the the ring what VMID to use for that command - * buffer. VMIDs are allocated dynamically as commands are submitted. - * The userspace drivers maintain their own address space and the kernel - * sets up their pages tables accordingly when they submit their - * command buffers and a VMID is assigned. - * Cayman/Trinity support up to 8 active VMs at any given time; - * SI supports 16. - */ - -/* - * vm helpers - * - * TODO bind a default page at vm initialization for default address - */ - -/** - * radeon_vm_num_pde - return the number of page directory entries - * - * @rdev: radeon_device pointer - * - * Calculate the number of page directory entries (cayman+). - */ -static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) -{ - return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; -} - -/** - * radeon_vm_directory_size - returns the size of the page directory in bytes - * - * @rdev: radeon_device pointer - * - * Calculate the size of the page directory in bytes (cayman+). - */ -static unsigned radeon_vm_directory_size(struct radeon_device *rdev) -{ - return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); -} - -/** - * radeon_vm_manager_init - init the vm manager - * - * @rdev: radeon_device pointer - * - * Init the vm manager (cayman+). - * Returns 0 for success, error for failure. - */ -int radeon_vm_manager_init(struct radeon_device *rdev) -{ - struct radeon_vm *vm; - struct radeon_bo_va *bo_va; - int r; - unsigned size; - - if (!rdev->vm_manager.enabled) { - /* allocate enough for 2 full VM pts */ - size = radeon_vm_directory_size(rdev); - size += rdev->vm_manager.max_pfn * 8; - size *= 2; - r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, - RADEON_GPU_PAGE_ALIGN(size), - RADEON_VM_PTB_ALIGN_SIZE, - RADEON_GEM_DOMAIN_VRAM); - if (r) { - dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", - (rdev->vm_manager.max_pfn * 8) >> 10); - return r; - } - - r = radeon_asic_vm_init(rdev); - if (r) - return r; - - rdev->vm_manager.enabled = true; - - r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); - if (r) - return r; - } - - /* restore page table */ - list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { - if (vm->page_directory == NULL) - continue; - - list_for_each_entry(bo_va, &vm->va, vm_list) { - bo_va->valid = false; - } - } - return 0; -} - -/** - * radeon_vm_free_pt - free the page table for a specific vm - * - * @rdev: radeon_device pointer - * @vm: vm to unbind - * - * Free the page table of a specific vm (cayman+). - * - * Global and local mutex must be lock! - */ -static void radeon_vm_free_pt(struct radeon_device *rdev, - struct radeon_vm *vm) -{ - struct radeon_bo_va *bo_va; - int i; - - if (!vm->page_directory) - return; - - list_del_init(&vm->list); - radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); - - list_for_each_entry(bo_va, &vm->va, vm_list) { - bo_va->valid = false; - } - - if (vm->page_tables == NULL) - return; - - for (i = 0; i < radeon_vm_num_pdes(rdev); i++) - radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); - - kfree(vm->page_tables); -} - -/** - * radeon_vm_manager_fini - tear down the vm manager - * - * @rdev: radeon_device pointer - * - * Tear down the VM manager (cayman+). - */ -void radeon_vm_manager_fini(struct radeon_device *rdev) -{ - struct radeon_vm *vm, *tmp; - int i; - - if (!rdev->vm_manager.enabled) - return; - - mutex_lock(&rdev->vm_manager.lock); - /* free all allocated page tables */ - list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { - mutex_lock(&vm->mutex); - radeon_vm_free_pt(rdev, vm); - mutex_unlock(&vm->mutex); - } - for (i = 0; i < RADEON_NUM_VM; ++i) { - radeon_fence_unref(&rdev->vm_manager.active[i]); - } - radeon_asic_vm_fini(rdev); - mutex_unlock(&rdev->vm_manager.lock); - - radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); - radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); - rdev->vm_manager.enabled = false; -} - -/** - * radeon_vm_evict - evict page table to make room for new one - * - * @rdev: radeon_device pointer - * @vm: VM we want to allocate something for - * - * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). - * Returns 0 for success, -ENOMEM for failure. - * - * Global and local mutex must be locked! - */ -static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) -{ - struct radeon_vm *vm_evict; - - if (list_empty(&rdev->vm_manager.lru_vm)) - return -ENOMEM; - - vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, - struct radeon_vm, list); - if (vm_evict == vm) - return -ENOMEM; - - mutex_lock(&vm_evict->mutex); - radeon_vm_free_pt(rdev, vm_evict); - mutex_unlock(&vm_evict->mutex); - return 0; -} - -/** - * radeon_vm_alloc_pt - allocates a page table for a VM - * - * @rdev: radeon_device pointer - * @vm: vm to bind - * - * Allocate a page table for the requested vm (cayman+). - * Returns 0 for success, error for failure. - * - * Global and local mutex must be locked! - */ -int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) -{ - unsigned pd_size, pd_entries, pts_size; - struct radeon_ib ib; - int r; - - if (vm == NULL) { - return -EINVAL; - } - - if (vm->page_directory != NULL) { - return 0; - } - - pd_size = radeon_vm_directory_size(rdev); - pd_entries = radeon_vm_num_pdes(rdev); - -retry: - r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, - &vm->page_directory, pd_size, - RADEON_VM_PTB_ALIGN_SIZE, false); - if (r == -ENOMEM) { - r = radeon_vm_evict(rdev, vm); - if (r) - return r; - goto retry; - - } else if (r) { - return r; - } - - vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); - - /* Initially clear the page directory */ - r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, - NULL, pd_entries * 2 + 64); - if (r) { - radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); - return r; - } - - ib.length_dw = 0; - - radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, - 0, pd_entries, 0, 0); - - radeon_semaphore_sync_to(ib.semaphore, vm->fence); - r = radeon_ib_schedule(rdev, &ib, NULL); - if (r) { - radeon_ib_free(rdev, &ib); - radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); - return r; - } - radeon_fence_unref(&vm->fence); - vm->fence = radeon_fence_ref(ib.fence); - radeon_ib_free(rdev, &ib); - radeon_fence_unref(&vm->last_flush); - - /* allocate page table array */ - pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); - vm->page_tables = kzalloc(pts_size, GFP_KERNEL); - - if (vm->page_tables == NULL) { - DRM_ERROR("Cannot allocate memory for page table array\n"); - radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); - return -ENOMEM; - } - - return 0; -} - -/** - * radeon_vm_add_to_lru - add VMs page table to LRU list - * - * @rdev: radeon_device pointer - * @vm: vm to add to LRU - * - * Add the allocated page table to the LRU list (cayman+). - * - * Global mutex must be locked! - */ -void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) -{ - list_del_init(&vm->list); - list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); -} - -/** - * radeon_vm_grab_id - allocate the next free VMID - * - * @rdev: radeon_device pointer - * @vm: vm to allocate id for - * @ring: ring we want to submit job to - * - * Allocate an id for the vm (cayman+). - * Returns the fence we need to sync to (if any). - * - * Global and local mutex must be locked! - */ -struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, - struct radeon_vm *vm, int ring) -{ - struct radeon_fence *best[RADEON_NUM_RINGS] = {}; - unsigned choices[2] = {}; - unsigned i; - - /* check if the id is still valid */ - if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) - return NULL; - - /* we definately need to flush */ - radeon_fence_unref(&vm->last_flush); - - /* skip over VMID 0, since it is the system VM */ - for (i = 1; i < rdev->vm_manager.nvm; ++i) { - struct radeon_fence *fence = rdev->vm_manager.active[i]; - - if (fence == NULL) { - /* found a free one */ - vm->id = i; - trace_radeon_vm_grab_id(vm->id, ring); - return NULL; - } - - if (radeon_fence_is_earlier(fence, best[fence->ring])) { - best[fence->ring] = fence; - choices[fence->ring == ring ? 0 : 1] = i; - } - } - - for (i = 0; i < 2; ++i) { - if (choices[i]) { - vm->id = choices[i]; - trace_radeon_vm_grab_id(vm->id, ring); - return rdev->vm_manager.active[choices[i]]; - } - } - - /* should never happen */ - BUG(); - return NULL; -} - -/** - * radeon_vm_fence - remember fence for vm - * - * @rdev: radeon_device pointer - * @vm: vm we want to fence - * @fence: fence to remember - * - * Fence the vm (cayman+). - * Set the fence used to protect page table and id. - * - * Global and local mutex must be locked! - */ -void radeon_vm_fence(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_fence *fence) -{ - radeon_fence_unref(&rdev->vm_manager.active[vm->id]); - rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); - - radeon_fence_unref(&vm->fence); - vm->fence = radeon_fence_ref(fence); - - radeon_fence_unref(&vm->last_id_use); - vm->last_id_use = radeon_fence_ref(fence); -} - -/** - * radeon_vm_bo_find - find the bo_va for a specific vm & bo - * - * @vm: requested vm - * @bo: requested buffer object - * - * Find @bo inside the requested vm (cayman+). - * Search inside the @bos vm list for the requested vm - * Returns the found bo_va or NULL if none is found - * - * Object has to be reserved! - */ -struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, - struct radeon_bo *bo) -{ - struct radeon_bo_va *bo_va; - - list_for_each_entry(bo_va, &bo->va, bo_list) { - if (bo_va->vm == vm) { - return bo_va; - } - } - return NULL; -} - -/** - * radeon_vm_bo_add - add a bo to a specific vm - * - * @rdev: radeon_device pointer - * @vm: requested vm - * @bo: radeon buffer object - * - * Add @bo into the requested vm (cayman+). - * Add @bo to the list of bos associated with the vm - * Returns newly added bo_va or NULL for failure - * - * Object has to be reserved! - */ -struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo) -{ - struct radeon_bo_va *bo_va; - - bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); - if (bo_va == NULL) { - return NULL; - } - bo_va->vm = vm; - bo_va->bo = bo; - bo_va->soffset = 0; - bo_va->eoffset = 0; - bo_va->flags = 0; - bo_va->valid = false; - bo_va->ref_count = 1; - INIT_LIST_HEAD(&bo_va->bo_list); - INIT_LIST_HEAD(&bo_va->vm_list); - - mutex_lock(&vm->mutex); - list_add(&bo_va->vm_list, &vm->va); - list_add_tail(&bo_va->bo_list, &bo->va); - mutex_unlock(&vm->mutex); - - return bo_va; -} - -/** - * radeon_vm_bo_set_addr - set bos virtual address inside a vm - * - * @rdev: radeon_device pointer - * @bo_va: bo_va to store the address - * @soffset: requested offset of the buffer in the VM address space - * @flags: attributes of pages (read/write/valid/etc.) - * - * Set offset of @bo_va (cayman+). - * Validate and set the offset requested within the vm address space. - * Returns 0 for success, error for failure. - * - * Object has to be reserved! - */ -int radeon_vm_bo_set_addr(struct radeon_device *rdev, - struct radeon_bo_va *bo_va, - uint64_t soffset, - uint32_t flags) -{ - uint64_t size = radeon_bo_size(bo_va->bo); - uint64_t eoffset, last_offset = 0; - struct radeon_vm *vm = bo_va->vm; - struct radeon_bo_va *tmp; - struct list_head *head; - unsigned last_pfn; - - if (soffset) { - /* make sure object fit at this offset */ - eoffset = soffset + size; - if (soffset >= eoffset) { - return -EINVAL; - } - - last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; - if (last_pfn > rdev->vm_manager.max_pfn) { - dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", - last_pfn, rdev->vm_manager.max_pfn); - return -EINVAL; - } - - } else { - eoffset = last_pfn = 0; - } - - mutex_lock(&vm->mutex); - head = &vm->va; - last_offset = 0; - list_for_each_entry(tmp, &vm->va, vm_list) { - if (bo_va == tmp) { - /* skip over currently modified bo */ - continue; - } - - if (soffset >= last_offset && eoffset <= tmp->soffset) { - /* bo can be added before this one */ - break; - } - if (eoffset > tmp->soffset && soffset < tmp->eoffset) { - /* bo and tmp overlap, invalid offset */ - dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", - bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, - (unsigned)tmp->soffset, (unsigned)tmp->eoffset); - mutex_unlock(&vm->mutex); - return -EINVAL; - } - last_offset = tmp->eoffset; - head = &tmp->vm_list; - } - - bo_va->soffset = soffset; - bo_va->eoffset = eoffset; - bo_va->flags = flags; - bo_va->valid = false; - list_move(&bo_va->vm_list, head); - - mutex_unlock(&vm->mutex); - return 0; -} - -/** - * radeon_vm_map_gart - get the physical address of a gart page - * - * @rdev: radeon_device pointer - * @addr: the unmapped addr - * - * Look up the physical address of the page that the pte resolves - * to (cayman+). - * Returns the physical address of the page. - */ -uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) -{ - uint64_t result; - - /* page table offset */ - result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); - - return result; -} - -/** - * radeon_vm_page_flags - translate page flags to what the hw uses - * - * @flags: flags comming from userspace - * - * Translate the flags the userspace ABI uses to hw flags. - */ -static uint32_t radeon_vm_page_flags(uint32_t flags) -{ - uint32_t hw_flags = 0; - hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; - hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; - hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; - if (flags & RADEON_VM_PAGE_SYSTEM) { - hw_flags |= R600_PTE_SYSTEM; - hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; - } - return hw_flags; -} - -/** - * radeon_vm_update_pdes - make sure that page directory is valid - * - * @rdev: radeon_device pointer - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * - * Allocates new page tables if necessary - * and updates the page directory (cayman+). - * Returns 0 for success, error for failure. - * - * Global and local mutex must be locked! - */ -static int radeon_vm_update_pdes(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_ib *ib, - uint64_t start, uint64_t end) -{ - static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; - - uint64_t last_pde = ~0, last_pt = ~0; - unsigned count = 0; - uint64_t pt_idx; - int r; - - start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; - end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; - - /* walk over the address space and update the page directory */ - for (pt_idx = start; pt_idx <= end; ++pt_idx) { - uint64_t pde, pt; - - if (vm->page_tables[pt_idx]) - continue; - -retry: - r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, - &vm->page_tables[pt_idx], - RADEON_VM_PTE_COUNT * 8, - RADEON_GPU_PAGE_SIZE, false); - - if (r == -ENOMEM) { - r = radeon_vm_evict(rdev, vm); - if (r) - return r; - goto retry; - } else if (r) { - return r; - } - - pde = vm->pd_gpu_addr + pt_idx * 8; - - pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); - - if (((last_pde + 8 * count) != pde) || - ((last_pt + incr * count) != pt)) { - - if (count) { - radeon_asic_vm_set_page(rdev, ib, last_pde, - last_pt, count, incr, - R600_PTE_VALID); - - count *= RADEON_VM_PTE_COUNT; - radeon_asic_vm_set_page(rdev, ib, last_pt, 0, - count, 0, 0); - } - - count = 1; - last_pde = pde; - last_pt = pt; - } else { - ++count; - } - } - - if (count) { - radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, - incr, R600_PTE_VALID); - - count *= RADEON_VM_PTE_COUNT; - radeon_asic_vm_set_page(rdev, ib, last_pt, 0, - count, 0, 0); - } - - return 0; -} - -/** - * radeon_vm_update_ptes - make sure that page tables are valid - * - * @rdev: radeon_device pointer - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * @dst: destination address to map to - * @flags: mapping flags - * - * Update the page tables in the range @start - @end (cayman+). - * - * Global and local mutex must be locked! - */ -static void radeon_vm_update_ptes(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_ib *ib, - uint64_t start, uint64_t end, - uint64_t dst, uint32_t flags) -{ - static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; - - uint64_t last_pte = ~0, last_dst = ~0; - unsigned count = 0; - uint64_t addr; - - start = start / RADEON_GPU_PAGE_SIZE; - end = end / RADEON_GPU_PAGE_SIZE; - - /* walk over the address space and update the page tables */ - for (addr = start; addr < end; ) { - uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; - unsigned nptes; - uint64_t pte; - - if ((addr & ~mask) == (end & ~mask)) - nptes = end - addr; - else - nptes = RADEON_VM_PTE_COUNT - (addr & mask); - - pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); - pte += (addr & mask) * 8; - - if ((last_pte + 8 * count) != pte) { - - if (count) { - radeon_asic_vm_set_page(rdev, ib, last_pte, - last_dst, count, - RADEON_GPU_PAGE_SIZE, - flags); - } - - count = nptes; - last_pte = pte; - last_dst = dst; - } else { - count += nptes; - } - - addr += nptes; - dst += nptes * RADEON_GPU_PAGE_SIZE; - } - - if (count) { - radeon_asic_vm_set_page(rdev, ib, last_pte, - last_dst, count, - RADEON_GPU_PAGE_SIZE, flags); - } -} - -/** - * radeon_vm_bo_update - map a bo into the vm page table - * - * @rdev: radeon_device pointer - * @vm: requested vm - * @bo: radeon buffer object - * @mem: ttm mem - * - * Fill in the page table entries for @bo (cayman+). - * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved & global and local mutex must be locked! - */ -int radeon_vm_bo_update(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, - struct ttm_mem_reg *mem) -{ - struct radeon_ib ib; - struct radeon_bo_va *bo_va; - unsigned nptes, npdes, ndw; - uint64_t addr; - int r; - - /* nothing to do if vm isn't bound */ - if (vm->page_directory == NULL) - return 0; - - bo_va = radeon_vm_bo_find(vm, bo); - if (bo_va == NULL) { - dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); - return -EINVAL; - } - - if (!bo_va->soffset) { - dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", - bo, vm); - return -EINVAL; - } - - if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) - return 0; - - bo_va->flags &= ~RADEON_VM_PAGE_VALID; - bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; - if (mem) { - addr = mem->start << PAGE_SHIFT; - if (mem->mem_type != TTM_PL_SYSTEM) { - bo_va->flags |= RADEON_VM_PAGE_VALID; - bo_va->valid = true; - } - if (mem->mem_type == TTM_PL_TT) { - bo_va->flags |= RADEON_VM_PAGE_SYSTEM; - } else { - addr += rdev->vm_manager.vram_base_offset; - } - } else { - addr = 0; - bo_va->valid = false; - } - - trace_radeon_vm_bo_update(bo_va); - - nptes = radeon_bo_ngpu_pages(bo); - - /* assume two extra pdes in case the mapping overlaps the borders */ - npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; - - /* padding, etc. */ - ndw = 64; - - if (RADEON_VM_BLOCK_SIZE > 11) - /* reserve space for one header for every 2k dwords */ - ndw += (nptes >> 11) * 4; - else - /* reserve space for one header for - every (1 << BLOCK_SIZE) entries */ - ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; - - /* reserve space for pte addresses */ - ndw += nptes * 2; - - /* reserve space for one header for every 2k dwords */ - ndw += (npdes >> 11) * 4; - - /* reserve space for pde addresses */ - ndw += npdes * 2; - - /* reserve space for clearing new page tables */ - ndw += npdes * 2 * RADEON_VM_PTE_COUNT; - - /* update too big for an IB */ - if (ndw > 0xfffff) - return -ENOMEM; - - r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); - if (r) - return r; - ib.length_dw = 0; - - r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); - if (r) { - radeon_ib_free(rdev, &ib); - return r; - } - - radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, - addr, radeon_vm_page_flags(bo_va->flags)); - - radeon_semaphore_sync_to(ib.semaphore, vm->fence); - r = radeon_ib_schedule(rdev, &ib, NULL); - if (r) { - radeon_ib_free(rdev, &ib); - return r; - } - radeon_fence_unref(&vm->fence); - vm->fence = radeon_fence_ref(ib.fence); - radeon_ib_free(rdev, &ib); - radeon_fence_unref(&vm->last_flush); - - return 0; -} - -/** - * radeon_vm_bo_rmv - remove a bo to a specific vm - * - * @rdev: radeon_device pointer - * @bo_va: requested bo_va - * - * Remove @bo_va->bo from the requested vm (cayman+). - * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and - * remove the ptes for @bo_va in the page table. - * Returns 0 for success. - * - * Object have to be reserved! - */ -int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va) -{ - int r = 0; - - mutex_lock(&rdev->vm_manager.lock); - mutex_lock(&bo_va->vm->mutex); - if (bo_va->soffset) { - r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); - } - mutex_unlock(&rdev->vm_manager.lock); - list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); - list_del(&bo_va->bo_list); - - kfree(bo_va); - return r; -} - -/** - * radeon_vm_bo_invalidate - mark the bo as invalid - * - * @rdev: radeon_device pointer - * @vm: requested vm - * @bo: radeon buffer object - * - * Mark @bo as invalid (cayman+). - */ -void radeon_vm_bo_invalidate(struct radeon_device *rdev, - struct radeon_bo *bo) -{ - struct radeon_bo_va *bo_va; - - list_for_each_entry(bo_va, &bo->va, bo_list) { - bo_va->valid = false; - } -} - -/** - * radeon_vm_init - initialize a vm instance - * - * @rdev: radeon_device pointer - * @vm: requested vm - * - * Init @vm fields (cayman+). - */ -void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) -{ - vm->id = 0; - vm->fence = NULL; - vm->last_flush = NULL; - vm->last_id_use = NULL; - mutex_init(&vm->mutex); - INIT_LIST_HEAD(&vm->list); - INIT_LIST_HEAD(&vm->va); -} - -/** - * radeon_vm_fini - tear down a vm instance - * - * @rdev: radeon_device pointer - * @vm: requested vm - * - * Tear down @vm (cayman+). - * Unbind the VM and remove all bos from the vm bo list - */ -void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) -{ - struct radeon_bo_va *bo_va, *tmp; - int r; - - mutex_lock(&rdev->vm_manager.lock); - mutex_lock(&vm->mutex); - radeon_vm_free_pt(rdev, vm); - mutex_unlock(&rdev->vm_manager.lock); - - if (!list_empty(&vm->va)) { - dev_err(rdev->dev, "still active bo inside vm\n"); - } - list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { - list_del_init(&bo_va->vm_list); - r = radeon_bo_reserve(bo_va->bo, false); - if (!r) { - list_del_init(&bo_va->bo_list); - radeon_bo_unreserve(bo_va->bo); - kfree(bo_va); - } - } - radeon_fence_unref(&vm->fence); - radeon_fence_unref(&vm->last_flush); - radeon_fence_unref(&vm->last_id_use); - mutex_unlock(&vm->mutex); -} diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index b96c819024b..d09650c1d72 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -344,18 +344,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); r = radeon_bo_wait(robj, &cur_placement, true); - switch (cur_placement) { - case TTM_PL_VRAM: - args->domain = RADEON_GEM_DOMAIN_VRAM; - break; - case TTM_PL_TT: - args->domain = RADEON_GEM_DOMAIN_GTT; - break; - case TTM_PL_SYSTEM: - args->domain = RADEON_GEM_DOMAIN_CPU; - default: - break; - } + args->domain = radeon_mem_type_to_domain(cur_placement); drm_gem_object_unreference_unlocked(gobj); r = radeon_gem_handle_lockup(rdev, r); return r; @@ -533,6 +522,42 @@ out: return r; } +int radeon_gem_op_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_op *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *robj; + int r; + + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) { + return -ENOENT; + } + robj = gem_to_radeon_bo(gobj); + r = radeon_bo_reserve(robj, false); + if (unlikely(r)) + goto out; + + switch (args->op) { + case RADEON_GEM_OP_GET_INITIAL_DOMAIN: + args->value = robj->initial_domain; + break; + case RADEON_GEM_OP_SET_INITIAL_DOMAIN: + robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | + RADEON_GEM_DOMAIN_GTT | + RADEON_GEM_DOMAIN_CPU); + break; + default: + r = -EINVAL; + } + + radeon_bo_unreserve(robj); +out: + drm_gem_object_unreference_unlocked(gobj); + return r; +} + int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index e24ca6ab96d..add62200840 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -64,8 +64,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux) radeon_router_select_ddc_port(radeon_connector); if (use_aux) { - struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; - ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2); + ret = i2c_transfer(&radeon_connector->ddc_bus->aux.ddc, msgs, 2); } else { ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); } @@ -95,6 +94,8 @@ static int pre_xfer(struct i2c_adapter *i2c_adap) struct radeon_i2c_bus_rec *rec = &i2c->rec; uint32_t temp; + mutex_lock(&i2c->mutex); + /* RV410 appears to have a bug where the hw i2c in reset * holds the i2c port in a bad state - switch hw i2c away before * doing DDC - do this for all r200s/r300s/r400s for safety sake @@ -171,6 +172,8 @@ static void post_xfer(struct i2c_adapter *i2c_adap) temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask; WREG32(rec->mask_data_reg, temp); temp = RREG32(rec->mask_data_reg); + + mutex_unlock(&i2c->mutex); } static int get_clock(void *i2c_priv) @@ -814,6 +817,8 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap, struct radeon_i2c_bus_rec *rec = &i2c->rec; int ret = 0; + mutex_lock(&i2c->mutex); + switch (rdev->family) { case CHIP_R100: case CHIP_RV100: @@ -880,6 +885,8 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap, break; } + mutex_unlock(&i2c->mutex); + return ret; } @@ -920,6 +927,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, i2c->adapter.dev.parent = &dev->pdev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); + mutex_init(&i2c->mutex); if (rec->mm_i2c || (rec->hw_capable && radeon_hw_i2c && @@ -950,16 +958,16 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, /* set the radeon bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "Radeon i2c bit bus %s", name); - i2c->adapter.algo_data = &i2c->algo.bit; - i2c->algo.bit.pre_xfer = pre_xfer; - i2c->algo.bit.post_xfer = post_xfer; - i2c->algo.bit.setsda = set_data; - i2c->algo.bit.setscl = set_clock; - i2c->algo.bit.getsda = get_data; - i2c->algo.bit.getscl = get_clock; - i2c->algo.bit.udelay = 10; - i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */ - i2c->algo.bit.data = i2c; + i2c->adapter.algo_data = &i2c->bit; + i2c->bit.pre_xfer = pre_xfer; + i2c->bit.post_xfer = post_xfer; + i2c->bit.setsda = set_data; + i2c->bit.setscl = set_clock; + i2c->bit.getsda = get_data; + i2c->bit.getscl = get_clock; + i2c->bit.udelay = 10; + i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */ + i2c->bit.data = i2c; ret = i2c_bit_add_bus(&i2c->adapter); if (ret) { DRM_ERROR("Failed to register bit i2c %s\n", name); @@ -974,46 +982,13 @@ out_free: } -struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, - struct radeon_i2c_bus_rec *rec, - const char *name) -{ - struct radeon_i2c_chan *i2c; - int ret; - - i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); - if (i2c == NULL) - return NULL; - - i2c->rec = *rec; - i2c->adapter.owner = THIS_MODULE; - i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; - i2c->dev = dev; - snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), - "Radeon aux bus %s", name); - i2c_set_adapdata(&i2c->adapter, i2c); - i2c->adapter.algo_data = &i2c->algo.dp; - i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; - i2c->algo.dp.address = 0; - ret = i2c_dp_aux_add_bus(&i2c->adapter); - if (ret) { - DRM_INFO("Failed to register i2c %s\n", name); - goto out_free; - } - - return i2c; -out_free: - kfree(i2c); - return NULL; - -} - void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) { if (!i2c) return; i2c_del_adapter(&i2c->adapter); + if (i2c->has_aux) + drm_dp_aux_unregister(&i2c->aux); kfree(i2c); } diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c index bdb0f93e73b..0b98ea13457 100644 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c @@ -399,7 +399,7 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) + if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; if (fn != NULL) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 089c9ffb0aa..16807afab36 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -287,7 +287,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); rdev->irq.installed = true; - r = drm_irq_install(rdev->ddev); + r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); if (r) { rdev->irq.installed = false; flush_work(&rdev->hotplug_work); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 114d1672d61..d25ae6acfd5 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -33,6 +33,13 @@ #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <linux/pm_runtime.h> + +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_has_atpx(void); +#else +static inline bool radeon_has_atpx(void) { return false; } +#endif + /** * radeon_driver_unload_kms - Main unload function for KMS. * @@ -100,6 +107,11 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) flags |= RADEON_IS_PCI; } + if ((radeon_runtime_pm != 0) && + radeon_has_atpx() && + ((flags & RADEON_IS_IGP) == 0)) + flags |= RADEON_IS_PX; + /* radeon_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must @@ -130,7 +142,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - if (radeon_runtime_pm != 0) { + if (radeon_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); @@ -433,6 +445,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file case RADEON_CS_RING_UVD: *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; break; + case RADEON_CS_RING_VCE: + *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; + break; default: return -EINVAL; } @@ -477,6 +492,43 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file else *value = rdev->pm.default_sclk * 10; break; + case RADEON_INFO_VCE_FW_VERSION: + *value = rdev->vce.fw_version; + break; + case RADEON_INFO_VCE_FB_VERSION: + *value = rdev->vce.fb_version; + break; + case RADEON_INFO_NUM_BYTES_MOVED: + value = (uint32_t*)&value64; + value_size = sizeof(uint64_t); + value64 = atomic64_read(&rdev->num_bytes_moved); + break; + case RADEON_INFO_VRAM_USAGE: + value = (uint32_t*)&value64; + value_size = sizeof(uint64_t); + value64 = atomic64_read(&rdev->vram_usage); + break; + case RADEON_INFO_GTT_USAGE: + value = (uint32_t*)&value64; + value_size = sizeof(uint64_t); + value64 = atomic64_read(&rdev->gtt_usage); + break; + case RADEON_INFO_ACTIVE_CU_COUNT: + if (rdev->family >= CHIP_BONAIRE) + *value = rdev->config.cik.active_cus; + else if (rdev->family >= CHIP_TAHITI) + *value = rdev->config.si.active_cus; + else if (rdev->family >= CHIP_CAYMAN) + *value = rdev->config.cayman.active_simds; + else if (rdev->family >= CHIP_CEDAR) + *value = rdev->config.evergreen.active_simds; + else if (rdev->family >= CHIP_RV770) + *value = rdev->config.rv770.active_simds; + else if (rdev->family >= CHIP_R600) + *value = rdev->config.r600.active_simds; + else + *value = 1; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; @@ -527,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; - struct radeon_bo_va *bo_va; + struct radeon_vm *vm; int r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); @@ -535,21 +587,37 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - radeon_vm_init(rdev, &fpriv->vm); - - /* map the ib pool buffer read only into - * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); + vm = &fpriv->vm; + r = radeon_vm_init(rdev, vm); if (r) { - radeon_vm_fini(rdev, &fpriv->vm); kfree(fpriv); return r; } + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (r) { + radeon_vm_fini(rdev, vm); + kfree(fpriv); + return r; + } + + /* map the ib pool buffer read only into + * virtual address space */ + vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, + RADEON_VA_IB_OFFSET, + RADEON_VM_PAGE_READABLE | + RADEON_VM_PAGE_SNOOPED); + + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (r) { + radeon_vm_fini(rdev, vm); + kfree(fpriv); + return r; + } + } file_priv->driver_priv = fpriv; } @@ -574,19 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev, /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; - struct radeon_bo_va *bo_va; + struct radeon_vm *vm = &fpriv->vm; int r; - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (!r) { + if (vm->ib_bo_va) + radeon_vm_bo_rmv(rdev, vm->ib_bo_va); + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + } } - radeon_vm_fini(rdev, &fpriv->vm); + radeon_vm_fini(rdev, vm); kfree(fpriv); file_priv->driver_priv = NULL; } @@ -610,6 +678,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev, if (rdev->cmask_filp == file_priv) rdev->cmask_filp = NULL; radeon_uvd_free_handles(rdev, file_priv); + radeon_vce_free_handles(rdev, file_priv); } /* @@ -804,5 +873,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), }; -int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); +int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 0b158f98d28..cafb1ccf2ec 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -385,7 +385,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, DRM_DEBUG_KMS("\n"); /* no fb bound */ - if (!atomic && !crtc->fb) { + if (!atomic && !crtc->primary->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } @@ -395,8 +395,8 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, target_fb = fb; } else { - radeon_fb = to_radeon_framebuffer(crtc->fb); - target_fb = crtc->fb; + radeon_fb = to_radeon_framebuffer(crtc->primary->fb); + target_fb = crtc->primary->fb; } switch (target_fb->bits_per_pixel) { @@ -444,7 +444,7 @@ retry: * We don't shutdown the display controller because new buffer * will end up in same spot. */ - if (!atomic && fb && fb != crtc->fb) { + if (!atomic && fb && fb != crtc->primary->fb) { struct radeon_bo *old_rbo; unsigned long nsize, osize; @@ -555,7 +555,7 @@ retry: WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); - if (!atomic && fb && fb != crtc->fb) { + if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); @@ -599,7 +599,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod } } - switch (crtc->fb->bits_per_pixel) { + switch (crtc->primary->fb->bits_per_pixel) { case 8: format = 2; break; @@ -1087,12 +1087,12 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) static void radeon_crtc_disable(struct drm_crtc *crtc) { radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - if (crtc->fb) { + if (crtc->primary->fb) { int r; struct radeon_framebuffer *radeon_fb; struct radeon_bo *rbo; - radeon_fb = to_radeon_framebuffer(crtc->fb); + radeon_fb = to_radeon_framebuffer(crtc->primary->fb); rbo = gem_to_radeon_bo(radeon_fb->obj); r = radeon_bo_reserve(rbo, false); if (unlikely(r)) diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 402dbe32c23..0592ddb0904 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -46,6 +46,10 @@ struct radeon_device; #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) +#define RADEON_MAX_HPD_PINS 7 +#define RADEON_MAX_CRTCS 6 +#define RADEON_MAX_AFMT_BLOCKS 7 + enum radeon_rmx_type { RMX_OFF, RMX_FULL, @@ -187,11 +191,11 @@ struct radeon_pll { struct radeon_i2c_chan { struct i2c_adapter adapter; struct drm_device *dev; - union { - struct i2c_algo_bit_data bit; - struct i2c_algo_dp_aux_data dp; - } algo; + struct i2c_algo_bit_data bit; struct radeon_i2c_bus_rec rec; + struct drm_dp_aux aux; + bool has_aux; + struct mutex mutex; }; /* mostly for macs, but really any system without connector tables */ @@ -233,8 +237,8 @@ struct radeon_mode_info { struct card_info *atom_card_info; enum radeon_connector_table connector_table; bool mode_config_initialized; - struct radeon_crtc *crtcs[6]; - struct radeon_afmt *afmt[7]; + struct radeon_crtc *crtcs[RADEON_MAX_CRTCS]; + struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS]; /* DVI-I properties */ struct drm_property *coherent_mode_property; /* DAC enable load detect */ @@ -302,6 +306,12 @@ struct radeon_atom_ss { uint16_t amount; }; +enum radeon_flip_status { + RADEON_FLIP_NONE, + RADEON_FLIP_PENDING, + RADEON_FLIP_SUBMITTED +}; + struct radeon_crtc { struct drm_crtc base; int crtc_id; @@ -325,8 +335,9 @@ struct radeon_crtc { struct drm_display_mode native_mode; int pll_id; /* page flipping */ - struct radeon_unpin_work *unpin_work; - int deferred_flip_completion; + struct workqueue_struct *flip_queue; + struct radeon_flip_work *flip_work; + enum radeon_flip_status flip_status; /* pll sharing */ struct radeon_atom_ss ss; bool ss_enabled; @@ -439,7 +450,6 @@ struct radeon_encoder { struct radeon_connector_atom_dig { uint32_t igp_lane_info; /* displayport */ - struct radeon_i2c_chan *dp_i2c_bus; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 dp_sink_type; int dp_clock; @@ -507,6 +517,7 @@ struct radeon_connector { struct radeon_i2c_chan *router_bus; enum radeon_connector_audio audio; enum radeon_connector_dither dither; + int pixelclock_for_modeset; }; struct radeon_framebuffer { @@ -690,6 +701,9 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct drm_connector *connector); +extern void radeon_dp_set_rx_power_state(struct drm_connector *connector, + u8 power_state); +extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector); extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); extern void radeon_atom_encoder_init(struct radeon_device *rdev); extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev); @@ -698,8 +712,6 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, uint8_t lane_set); extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); -extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, - u8 write_byte, u8 *read_byte); void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); extern void radeon_i2c_init(struct radeon_device *rdev); @@ -711,9 +723,6 @@ extern void radeon_i2c_add(struct radeon_device *rdev, const char *name); extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev, struct radeon_i2c_bus_rec *i2c_bus); -extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, - struct radeon_i2c_bus_rec *rec, - const char *name); extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_bus_rec *rec, const char *name); @@ -910,6 +919,7 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) void radeon_fb_output_poll_changed(struct radeon_device *rdev); +void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id); void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 08595cf90b0..6c717b257d6 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -56,11 +56,36 @@ static void radeon_bo_clear_va(struct radeon_bo *bo) } } +static void radeon_update_memory_usage(struct radeon_bo *bo, + unsigned mem_type, int sign) +{ + struct radeon_device *rdev = bo->rdev; + u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; + + switch (mem_type) { + case TTM_PL_TT: + if (sign > 0) + atomic64_add(size, &rdev->gtt_usage); + else + atomic64_sub(size, &rdev->gtt_usage); + break; + case TTM_PL_VRAM: + if (sign > 0) + atomic64_add(size, &rdev->vram_usage); + else + atomic64_sub(size, &rdev->vram_usage); + break; + } +} + static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct radeon_bo *bo; bo = container_of(tbo, struct radeon_bo, tbo); + + radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); + mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); @@ -79,7 +104,7 @@ bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { - u32 c = 0; + u32 c = 0, i; rbo->placement.fpfn = 0; rbo->placement.lpfn = 0; @@ -106,6 +131,17 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; rbo->placement.num_placement = c; rbo->placement.num_busy_placement = c; + + /* + * Use two-ended allocation depending on the buffer size to + * improve fragmentation quality. + * 512kb was measured as the most optimal number. + */ + if (rbo->tbo.mem.size > 512 * 1024) { + for (i = 0; i < c; i++) { + rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; + } + } } int radeon_bo_create(struct radeon_device *rdev, @@ -120,7 +156,6 @@ int radeon_bo_create(struct radeon_device *rdev, size = ALIGN(size, PAGE_SIZE); - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; if (kernel) { type = ttm_bo_type_kernel; } else if (sg) { @@ -145,6 +180,9 @@ int radeon_bo_create(struct radeon_device *rdev, bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); + bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | + RADEON_GEM_DOMAIN_GTT | + RADEON_GEM_DOMAIN_CPU); radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); @@ -338,42 +376,109 @@ void radeon_bo_fini(struct radeon_device *rdev) arch_phys_wc_del(rdev->mc.vram_mtrr); } -void radeon_bo_list_add_object(struct radeon_bo_list *lobj, - struct list_head *head) +/* Returns how many bytes TTM can move per IB. + */ +static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) { - if (lobj->written) { - list_add(&lobj->tv.head, head); - } else { - list_add_tail(&lobj->tv.head, head); - } + u64 real_vram_size = rdev->mc.real_vram_size; + u64 vram_usage = atomic64_read(&rdev->vram_usage); + + /* This function is based on the current VRAM usage. + * + * - If all of VRAM is free, allow relocating the number of bytes that + * is equal to 1/4 of the size of VRAM for this IB. + + * - If more than one half of VRAM is occupied, only allow relocating + * 1 MB of data for this IB. + * + * - From 0 to one half of used VRAM, the threshold decreases + * linearly. + * __________________ + * 1/4 of -|\ | + * VRAM | \ | + * | \ | + * | \ | + * | \ | + * | \ | + * | \ | + * | \________|1 MB + * |----------------| + * VRAM 0 % 100 % + * used used + * + * Note: It's a threshold, not a limit. The threshold must be crossed + * for buffer relocations to stop, so any buffer of an arbitrary size + * can be moved as long as the threshold isn't crossed before + * the relocation takes place. We don't want to disable buffer + * relocations completely. + * + * The idea is that buffers should be placed in VRAM at creation time + * and TTM should only do a minimum number of relocations during + * command submission. In practice, you need to submit at least + * a dozen IBs to move all buffers to VRAM if they are in GTT. + * + * Also, things can get pretty crazy under memory pressure and actual + * VRAM usage can change a lot, so playing safe even at 50% does + * consistently increase performance. + */ + + u64 half_vram = real_vram_size >> 1; + u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; + u64 bytes_moved_threshold = half_free_vram >> 1; + return max(bytes_moved_threshold, 1024*1024ull); } -int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, +int radeon_bo_list_validate(struct radeon_device *rdev, + struct ww_acquire_ctx *ticket, struct list_head *head, int ring) { - struct radeon_bo_list *lobj; + struct radeon_cs_reloc *lobj; struct radeon_bo *bo; - u32 domain; int r; + u64 bytes_moved = 0, initial_bytes_moved; + u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); r = ttm_eu_reserve_buffers(ticket, head); if (unlikely(r != 0)) { return r; } + list_for_each_entry(lobj, head, tv.head) { - bo = lobj->bo; + bo = lobj->robj; if (!bo->pin_count) { - domain = lobj->domain; - + u32 domain = lobj->prefered_domains; + u32 current_domain = + radeon_mem_type_to_domain(bo->tbo.mem.mem_type); + + /* Check if this buffer will be moved and don't move it + * if we have moved too many buffers for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if ((lobj->allowed_domains & current_domain) != 0 && + (domain & current_domain) == 0 && /* will be moved */ + bytes_moved > bytes_moved_threshold) { + /* don't move it */ + domain = current_domain; + } + retry: radeon_ttm_placement_from_domain(bo, domain); if (ring == R600_RING_TYPE_UVD_INDEX) radeon_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, - true, false); + + initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + bytes_moved += atomic64_read(&rdev->num_bytes_moved) - + initial_bytes_moved; + if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != lobj->alt_domain) { - domain = lobj->alt_domain; + if (r != -ERESTARTSYS && + domain != lobj->allowed_domains) { + domain = lobj->allowed_domains; goto retry; } ttm_eu_backoff_reservation(ticket, head); @@ -564,14 +669,23 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, } void radeon_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *new_mem) { struct radeon_bo *rbo; + if (!radeon_ttm_bo_is_radeon_bo(bo)) return; + rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 1); radeon_vm_bo_invalidate(rbo->rdev, rbo); + + /* update statistics */ + if (!new_mem) + return; + + radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); + radeon_update_memory_usage(rbo, new_mem->mem_type, 1); } int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) @@ -586,22 +700,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type == TTM_PL_VRAM) { - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) > rdev->mc.visible_vram_size) { - /* hurrah the memory is not visible ! */ - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); - if (unlikely(r != 0)) - return r; - offset = bo->mem.start << PAGE_SHIFT; - /* this should not happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; - } + if (bo->mem.mem_type != TTM_PL_VRAM) + return 0; + + size = bo->mem.num_pages << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; + if ((offset + size) <= rdev->mc.visible_vram_size) + return 0; + + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + r = ttm_bo_validate(bo, &rbo->placement, false, false); + if (unlikely(r == -ENOMEM)) { + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + return ttm_bo_validate(bo, &rbo->placement, false, false); + } else if (unlikely(r != 0)) { + return r; } + + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return -EINVAL; + return 0; } @@ -609,7 +731,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) { int r; - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) return r; spin_lock(&bo->tbo.bdev->fence_lock); diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 209b1115026..5a873f31a17 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -65,7 +65,7 @@ static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) { int r; - r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); + r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(bo->rdev->dev, "%p reserve failed\n", bo); @@ -138,9 +138,8 @@ extern int radeon_bo_evict_vram(struct radeon_device *rdev); extern void radeon_bo_force_delete(struct radeon_device *rdev); extern int radeon_bo_init(struct radeon_device *rdev); extern void radeon_bo_fini(struct radeon_device *rdev); -extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, - struct list_head *head); -extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, +extern int radeon_bo_list_validate(struct radeon_device *rdev, + struct ww_acquire_ctx *ticket, struct list_head *head, int ring); extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma); @@ -151,7 +150,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop); extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); + struct ttm_mem_reg *new_mem); extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); @@ -181,7 +180,7 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, extern int radeon_sa_bo_new(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, struct radeon_sa_bo **sa_bo, - unsigned size, unsigned align, bool block); + unsigned size, unsigned align); extern void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, struct radeon_fence *fence); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 8e8153e471c..e447e390d09 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) rdev->pm.dpm.ac_power = true; else rdev->pm.dpm.ac_power = false; - if (rdev->asic->dpm.enable_bapm) - radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); + if (rdev->family == CHIP_ARUBA) { + if (rdev->asic->dpm.enable_bapm) + radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); + } mutex_unlock(&rdev->pm.mutex); } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { @@ -260,7 +262,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) if (!ring->ready) { continue; } - r = radeon_fence_wait_empty_locked(rdev, i); + r = radeon_fence_wait_empty(rdev, i); if (r) { /* needs a GPU reset dont reset here */ mutex_unlock(&rdev->ring_lock); @@ -361,6 +363,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set profile when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (strncmp("default", buf, strlen("default")) == 0) @@ -409,6 +416,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set method when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + count = -EINVAL; + goto fail; + } + /* we don't support the legacy modes with dpm */ if (rdev->pm.pm_method == PM_METHOD_DPM) { count = -EINVAL; @@ -446,6 +460,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -459,6 +477,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set dpm state when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("battery", buf, strlen("battery")) == 0) rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; @@ -485,6 +508,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); @@ -500,6 +527,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, enum radeon_dpm_forced_level level; int ret = 0; + /* Can't force performance level when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("low", buf, strlen("low")) == 0) { level = RADEON_DPM_FORCED_LEVEL_LOW; @@ -538,8 +570,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); + struct drm_device *ddev = rdev->ddev; int temp; + /* Can't get temperature when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (rdev->asic->pm.get_temperature) temp = radeon_get_temperature(rdev); else @@ -603,7 +641,6 @@ static const struct attribute_group *hwmon_groups[] = { static int radeon_hwmon_init(struct radeon_device *rdev) { int err = 0; - struct device *hwmon_dev; switch (rdev->pm.int_thermal_type) { case THERMAL_TYPE_RV6XX: @@ -616,11 +653,11 @@ static int radeon_hwmon_init(struct radeon_device *rdev) case THERMAL_TYPE_KV: if (rdev->asic->pm.get_temperature == NULL) return err; - hwmon_dev = hwmon_device_register_with_groups(rdev->dev, - "radeon", rdev, - hwmon_groups); - if (IS_ERR(hwmon_dev)) { - err = PTR_ERR(hwmon_dev); + rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev, + "radeon", rdev, + hwmon_groups); + if (IS_ERR(rdev->pm.int_hwmon_dev)) { + err = PTR_ERR(rdev->pm.int_hwmon_dev); dev_err(rdev->dev, "Unable to register hwmon device: %d\n", err); } @@ -632,6 +669,12 @@ static int radeon_hwmon_init(struct radeon_device *rdev) return err; } +static void radeon_hwmon_fini(struct radeon_device *rdev) +{ + if (rdev->pm.int_hwmon_dev) + hwmon_device_unregister(rdev->pm.int_hwmon_dev); +} + static void radeon_dpm_thermal_work_handler(struct work_struct *work) { struct radeon_device *rdev = @@ -826,6 +869,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) /* no need to reprogram if nothing changed unless we are on BTC+ */ if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { + /* vce just modifies an existing state so force a change */ + if (ps->vce_active != rdev->pm.dpm.vce_active) + goto force; if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { /* for pre-BTC and APUs if the num crtcs changed but state is the same, * all we need to do is update the display configuration. @@ -862,16 +908,21 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) } } +force: if (radeon_dpm == 1) { printk("switching from power state:\n"); radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); printk("switching to power state:\n"); radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); } + mutex_lock(&rdev->ddev->struct_mutex); down_write(&rdev->pm.mclk_lock); mutex_lock(&rdev->ring_lock); + /* update whether vce is active */ + ps->vce_active = rdev->pm.dpm.vce_active; + ret = radeon_dpm_pre_set_power_state(rdev); if (ret) goto done; @@ -888,7 +939,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) for (i = 0; i < RADEON_NUM_RINGS; i++) { struct radeon_ring *ring = &rdev->ring[i]; if (ring->ready) - radeon_fence_wait_empty_locked(rdev, i); + radeon_fence_wait_empty(rdev, i); } /* program the new power state */ @@ -960,6 +1011,23 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) } } +void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable) +{ + if (enable) { + mutex_lock(&rdev->pm.mutex); + rdev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL; + mutex_unlock(&rdev->pm.mutex); + } else { + mutex_lock(&rdev->pm.mutex); + rdev->pm.dpm.vce_active = false; + mutex_unlock(&rdev->pm.mutex); + } + + radeon_pm_compute_clocks(rdev); +} + static void radeon_pm_suspend_old(struct radeon_device *rdev) { mutex_lock(&rdev->pm.mutex); @@ -1041,7 +1109,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) if (ret) goto dpm_resume_fail; rdev->pm.dpm_enabled = true; - radeon_pm_compute_clocks(rdev); return; dpm_resume_fail: @@ -1235,6 +1302,7 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_RV670: case CHIP_RS780: case CHIP_RS880: + case CHIP_RV770: case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: @@ -1251,7 +1319,6 @@ int radeon_pm_init(struct radeon_device *rdev) else rdev->pm.pm_method = PM_METHOD_PROFILE; break; - case CHIP_RV770: case CHIP_RV730: case CHIP_RV710: case CHIP_RV740: @@ -1273,6 +1340,7 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: + case CHIP_MULLINS: /* DPM requires the RLC, RV770+ dGPU requires SMC */ if (!rdev->rlc_fw) rdev->pm.pm_method = PM_METHOD_PROFILE; @@ -1331,6 +1399,8 @@ static void radeon_pm_fini_old(struct radeon_device *rdev) device_remove_file(rdev->dev, &dev_attr_power_method); } + radeon_hwmon_fini(rdev); + if (rdev->pm.power_state) kfree(rdev->pm.power_state); } @@ -1350,6 +1420,8 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev) } radeon_dpm_fini(rdev); + radeon_hwmon_fini(rdev); + if (rdev->pm.power_state) kfree(rdev->pm.power_state); } @@ -1375,12 +1447,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) rdev->pm.active_crtcs = 0; rdev->pm.active_crtc_count = 0; - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (radeon_crtc->enabled) { - rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); - rdev->pm.active_crtc_count++; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); + rdev->pm.active_crtc_count++; + } } } @@ -1447,12 +1521,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) /* update active crtc counts */ rdev->pm.dpm.new_active_crtcs = 0; rdev->pm.dpm.new_active_crtc_count = 0; - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (crtc->enabled) { - rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); - rdev->pm.dpm.new_active_crtc_count++; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled) { + rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); + rdev->pm.dpm.new_active_crtc_count++; + } } } @@ -1578,8 +1654,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; + struct drm_device *ddev = rdev->ddev; - if (rdev->pm.dpm_enabled) { + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + seq_printf(m, "PX asic powered off\n"); + } else if (rdev->pm.dpm_enabled) { mutex_lock(&rdev->pm.mutex); if (rdev->asic->dpm.debugfs_print_current_performance_level) radeon_dpm_debugfs_print_current_performance_level(rdev, m); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 1b783f0e6d3..f8050f5429e 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -63,7 +63,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, { int r; - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256); if (r) { dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); return r; @@ -139,12 +139,19 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, } /* 64 dwords should be enough for fence too */ - r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); if (r) { dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; } + /* grab a vm id if necessary */ + if (ib->vm) { + struct radeon_fence *vm_id_fence; + vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); + radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); + } + /* sync with other rings */ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); if (r) { @@ -153,11 +160,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, return r; } - /* if we can't remember our last VM flush then flush now! */ - /* XXX figure out why we have to flush for every IB */ - if (ib->vm /*&& !ib->vm->last_flush*/) { - radeon_ring_vm_flush(rdev, ib->ring, ib->vm); - } + if (ib->vm) + radeon_vm_flush(rdev, ib->vm, ib->ring); + if (const_ib) { radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); @@ -172,10 +177,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, if (const_ib) { const_ib->fence = radeon_fence_ref(ib->fence); } - /* we just flushed the VM, remember that */ - if (ib->vm && !ib->vm->last_flush) { - ib->vm->last_flush = radeon_fence_ref(ib->fence); - } + + if (ib->vm) + radeon_vm_fence(rdev, ib->vm, ib->fence); + radeon_ring_unlock_commit(rdev, ring); return 0; } @@ -257,6 +262,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) r = radeon_ib_test(rdev, i, ring); if (r) { ring->ready = false; + rdev->needs_reset = false; if (i == RADEON_RING_TYPE_GFX_INDEX) { /* oh, oh, that's really bad */ @@ -342,13 +348,17 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, */ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) { - ring->rptr = radeon_ring_get_rptr(rdev, ring); + uint32_t rptr = radeon_ring_get_rptr(rdev, ring); + /* This works because ring_size is a power of 2 */ - ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); + ring->ring_free_dw = rptr + (ring->ring_size / 4); ring->ring_free_dw -= ring->wptr; ring->ring_free_dw &= ring->ptr_mask; if (!ring->ring_free_dw) { + /* this is an empty ring */ ring->ring_free_dw = ring->ring_size / 4; + /* update lockup info to avoid false positive */ + radeon_ring_lockup_update(rdev, ring); } } @@ -372,19 +382,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi /* Align requested size with padding so unlock_commit can * pad safely */ radeon_ring_free_size(rdev, ring); - if (ring->ring_free_dw == (ring->ring_size / 4)) { - /* This is an empty ring update lockup info to avoid - * false positive. - */ - radeon_ring_lockup_update(ring); - } ndw = (ndw + ring->align_mask) & ~ring->align_mask; while (ndw > (ring->ring_free_dw - 1)) { radeon_ring_free_size(rdev, ring); if (ndw < ring->ring_free_dw) { break; } - r = radeon_fence_wait_next_locked(rdev, ring->idx); + r = radeon_fence_wait_next(rdev, ring->idx); if (r) return r; } @@ -478,39 +482,17 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin } /** - * radeon_ring_force_activity - add some nop packets to the ring - * - * @rdev: radeon_device pointer - * @ring: radeon_ring structure holding ring information - * - * Add some nop packets to the ring to force activity (all asics). - * Used for lockup detection to see if the rptr is advancing. - */ -void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) -{ - int r; - - radeon_ring_free_size(rdev, ring); - if (ring->rptr == ring->wptr) { - r = radeon_ring_alloc(rdev, ring, 1); - if (!r) { - radeon_ring_write(ring, ring->nop); - radeon_ring_commit(rdev, ring); - } - } -} - -/** * radeon_ring_lockup_update - update lockup variables * * @ring: radeon_ring structure holding ring information * * Update the last rptr value and timestamp (all asics). */ -void radeon_ring_lockup_update(struct radeon_ring *ring) +void radeon_ring_lockup_update(struct radeon_device *rdev, + struct radeon_ring *ring) { - ring->last_rptr = ring->rptr; - ring->last_activity = jiffies; + atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring)); + atomic64_set(&ring->last_activity, jiffies_64); } /** @@ -518,40 +500,23 @@ void radeon_ring_lockup_update(struct radeon_ring *ring) * @rdev: radeon device structure * @ring: radeon_ring structure holding ring information * - * We don't need to initialize the lockup tracking information as we will either - * have CP rptr to a different value of jiffies wrap around which will force - * initialization of the lockup tracking informations. - * - * A possible false positivie is if we get call after while and last_cp_rptr == - * the current CP rptr, even if it's unlikely it might happen. To avoid this - * if the elapsed time since last call is bigger than 2 second than we return - * false and update the tracking information. Due to this the caller must call - * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported - * the fencing code should be cautious about that. - * - * Caller should write to the ring to force CP to do something so we don't get - * false positive when CP is just gived nothing to do. - * - **/ + */ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) { - unsigned long cjiffies, elapsed; + uint32_t rptr = radeon_ring_get_rptr(rdev, ring); + uint64_t last = atomic64_read(&ring->last_activity); + uint64_t elapsed; - cjiffies = jiffies; - if (!time_after(cjiffies, ring->last_activity)) { - /* likely a wrap around */ - radeon_ring_lockup_update(ring); + if (rptr != atomic_read(&ring->last_rptr)) { + /* ring is still working, no lockup */ + radeon_ring_lockup_update(rdev, ring); return false; } - ring->rptr = radeon_ring_get_rptr(rdev, ring); - if (ring->rptr != ring->last_rptr) { - /* CP is still working no lockup */ - radeon_ring_lockup_update(ring); - return false; - } - elapsed = jiffies_to_msecs(cjiffies - ring->last_activity); + + elapsed = jiffies_to_msecs(jiffies_64 - last); if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) { - dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); + dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n", + ring->idx, elapsed); return true; } /* give a chance to the GPU ... */ @@ -709,7 +674,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig if (radeon_debugfs_ring_init(rdev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); } - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return 0; } @@ -780,8 +745,6 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); - seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", - ring->rptr, ring->rptr); seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); seq_printf(m, "last semaphore wait addr : 0x%016llx\n", @@ -814,6 +777,8 @@ static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX; static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX; +static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX; +static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX; static struct drm_info_list radeon_debugfs_ring_info_list[] = { {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index}, @@ -822,6 +787,8 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = { {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index}, {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index}, {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index}, + {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index}, + {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index}, }; static int radeon_debugfs_sa_info(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index c0625805cdd..adcf3e2f07d 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -312,7 +312,7 @@ static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, int radeon_sa_bo_new(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, struct radeon_sa_bo **sa_bo, - unsigned size, unsigned align, bool block) + unsigned size, unsigned align) { struct radeon_fence *fences[RADEON_NUM_RINGS]; unsigned tries[RADEON_NUM_RINGS]; @@ -353,14 +353,11 @@ int radeon_sa_bo_new(struct radeon_device *rdev, r = radeon_fence_wait_any(rdev, fences, false); spin_lock(&sa_manager->wq.lock); /* if we have nothing to wait for block */ - if (r == -ENOENT && block) { + if (r == -ENOENT) { r = wait_event_interruptible_locked( sa_manager->wq, radeon_sa_event(sa_manager, size, align) ); - - } else if (r == -ENOENT) { - r = -ENOMEM; } } while (!r); diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 2b42aa1914f..dbd6bcde92d 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -34,14 +34,15 @@ int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { + uint32_t *cpu_addr; int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { return -ENOMEM; } - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, - &(*semaphore)->sa_bo, 8, 8, true); + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, + 8 * RADEON_NUM_SYNCS, 8); if (r) { kfree(*semaphore); *semaphore = NULL; @@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev, } (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); - *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; + + cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); + for (i = 0; i < RADEON_NUM_SYNCS; ++i) + cpu_addr[i] = 0; for (i = 0; i < RADEON_NUM_RINGS; ++i) (*semaphore)->sync_to[i] = NULL; @@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int ring) { + unsigned count = 0; int i, r; for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -140,6 +145,14 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, return -EINVAL; } + if (++count > RADEON_NUM_SYNCS) { + /* not enough room, wait manually */ + r = radeon_fence_wait(fence, false); + if (r) + return r; + continue; + } + /* allocate enough space for sync command */ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); if (r) { @@ -150,7 +163,9 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { /* signaling wasn't successful wait manually */ radeon_ring_undo(&rdev->ring[i]); - radeon_fence_wait_locked(fence); + r = radeon_fence_wait(fence, false); + if (r) + return r; continue; } @@ -158,12 +173,16 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { /* waiting wasn't successful wait manually */ radeon_ring_undo(&rdev->ring[i]); - radeon_fence_wait_locked(fence); + r = radeon_fence_wait(fence, false); + if (r) + return r; continue; } radeon_ring_commit(rdev, &rdev->ring[i]); radeon_fence_note_sync(fence, ring); + + semaphore->gpu_addr += 8; } return 0; diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 956ab7f14e1..23bb64fd775 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -3054,7 +3054,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) value = 0; else - value = drm_dev_to_irq(dev); + value = dev->pdev->irq; break; case RADEON_PARAM_GART_BASE: value = dev_priv->gart_vm_start; @@ -3258,4 +3258,4 @@ struct drm_ioctl_desc radeon_ioctls[] = { DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) }; -int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); +int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls); diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 12e8099a082..3a13e0d1055 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -257,20 +257,36 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_fence **fence) { + uint32_t handle = ring->idx ^ 0xdeafbeef; int r; if (ring->idx == R600_RING_TYPE_UVD_INDEX) { - r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); + r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); if (r) { DRM_ERROR("Failed to get dummy create msg\n"); return r; } - r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence); + r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); if (r) { DRM_ERROR("Failed to get dummy destroy msg\n"); return r; } + + } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || + ring->idx == TN_RING_TYPE_VCE2_INDEX) { + r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); + if (r) { + DRM_ERROR("Failed to get dummy create msg\n"); + return r; + } + + r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); + if (r) { + DRM_ERROR("Failed to get dummy destroy msg\n"); + return r; + } + } else { r = radeon_ring_lock(rdev, ring, 64); if (r) { @@ -486,6 +502,16 @@ out_cleanup: printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); } +static bool radeon_test_sync_possible(struct radeon_ring *ringA, + struct radeon_ring *ringB) +{ + if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && + ringB->idx == TN_RING_TYPE_VCE1_INDEX) + return false; + + return true; +} + void radeon_test_syncing(struct radeon_device *rdev) { int i, j, k; @@ -500,6 +526,9 @@ void radeon_test_syncing(struct radeon_device *rdev) if (!ringB->ready) continue; + if (!radeon_test_sync_possible(ringA, ringB)) + continue; + DRM_INFO("Testing syncing between rings %d and %d...\n", i, j); radeon_test_ring_sync(rdev, ringA, ringB); @@ -511,6 +540,12 @@ void radeon_test_syncing(struct radeon_device *rdev) if (!ringC->ready) continue; + if (!radeon_test_sync_possible(ringA, ringC)) + continue; + + if (!radeon_test_sync_possible(ringB, ringC)) + continue; + DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k); radeon_test_ring_sync2(rdev, ringA, ringB, ringC); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 77f5b0c3edb..c8a8a5144ec 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -406,8 +406,14 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + if (r) { + return r; + } } - return r; + + /* update statistics */ + atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); + return 0; } static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) @@ -701,7 +707,9 @@ int radeon_ttm_init(struct radeon_device *rdev) /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, rdev->mman.bo_global_ref.ref.object, - &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, + &radeon_bo_driver, + rdev->ddev->anon_inode->i_mapping, + DRM_FILE_PAGE_OFFSET, rdev->need_dma32); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -714,6 +722,9 @@ int radeon_ttm_init(struct radeon_device *rdev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + /* Change the size here instead of the init above so only lpfn is affected */ + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->stollen_vga_memory); @@ -739,7 +750,6 @@ int radeon_ttm_init(struct radeon_device *rdev) } DRM_INFO("radeon: %uM of GTT memory ready.\n", (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; r = radeon_ttm_debugfs_init(rdev); if (r) { @@ -935,7 +945,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, while (size) { loff_t p = *pos / PAGE_SIZE; unsigned off = *pos & ~PAGE_MASK; - ssize_t cur_size = min(size, PAGE_SIZE - off); + size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); struct page *page; void *ptr; diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index a77cd274dfc..4e7c3269b18 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h @@ -52,14 +52,20 @@ #define BONAIRE_RLC_UCODE_SIZE 2048 #define KB_RLC_UCODE_SIZE 2560 #define KV_RLC_UCODE_SIZE 2560 +#define ML_RLC_UCODE_SIZE 2560 /* MC */ #define BTC_MC_UCODE_SIZE 6024 #define CAYMAN_MC_UCODE_SIZE 6037 #define SI_MC_UCODE_SIZE 7769 +#define TAHITI_MC_UCODE_SIZE 7808 +#define PITCAIRN_MC_UCODE_SIZE 7775 +#define VERDE_MC_UCODE_SIZE 7875 #define OLAND_MC_UCODE_SIZE 7863 -#define CIK_MC_UCODE_SIZE 7866 +#define BONAIRE_MC_UCODE_SIZE 7866 +#define BONAIRE_MC2_UCODE_SIZE 7948 #define HAWAII_MC_UCODE_SIZE 7933 +#define HAWAII_MC2_UCODE_SIZE 8091 /* SDMA */ #define CIK_SDMA_UCODE_SIZE 1050 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 6781fee1eaa..a4ad270e826 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev) case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: + case CHIP_MULLINS: fw_name = FIRMWARE_BONAIRE; break; @@ -171,6 +172,8 @@ void radeon_uvd_fini(struct radeon_device *rdev) radeon_bo_unref(&rdev->uvd.vcpu_bo); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); + release_firmware(rdev->uvd_fw); } @@ -453,7 +456,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, } reloc = p->relocs_ptr[(idx / 4)]; - start = reloc->lobj.gpu_offset; + start = reloc->gpu_offset; end = start + radeon_bo_size(reloc->robj); start += offset; @@ -463,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, cmd = radeon_get_ib_value(p, p->idx) >> 1; if (cmd < 0x4) { + if (end <= start) { + DRM_ERROR("invalid reloc offset %X!\n", offset); + return -EINVAL; + } if ((end - start) < buf_sizes[cmd]) { DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c new file mode 100644 index 00000000000..aa21c31a846 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -0,0 +1,771 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Christian König <christian.koenig@amd.com> + */ + +#include <linux/firmware.h> +#include <linux/module.h> +#include <drm/drmP.h> +#include <drm/drm.h> + +#include "radeon.h" +#include "radeon_asic.h" +#include "sid.h" + +/* 1 second timeout */ +#define VCE_IDLE_TIMEOUT_MS 1000 + +/* Firmware Names */ +#define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin" + +MODULE_FIRMWARE(FIRMWARE_BONAIRE); + +static void radeon_vce_idle_work_handler(struct work_struct *work); + +/** + * radeon_vce_init - allocate memory, load vce firmware + * + * @rdev: radeon_device pointer + * + * First step to get VCE online, allocate memory and load the firmware + */ +int radeon_vce_init(struct radeon_device *rdev) +{ + static const char *fw_version = "[ATI LIB=VCEFW,"; + static const char *fb_version = "[ATI LIB=VCEFWSTATS,"; + unsigned long size; + const char *fw_name, *c; + uint8_t start, mid, end; + int i, r; + + INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler); + + switch (rdev->family) { + case CHIP_BONAIRE: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_HAWAII: + case CHIP_MULLINS: + fw_name = FIRMWARE_BONAIRE; + break; + + default: + return -EINVAL; + } + + r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); + if (r) { + dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n", + fw_name); + return r; + } + + /* search for firmware version */ + + size = rdev->vce_fw->size - strlen(fw_version) - 9; + c = rdev->vce_fw->data; + for (;size > 0; --size, ++c) + if (strncmp(c, fw_version, strlen(fw_version)) == 0) + break; + + if (size == 0) + return -EINVAL; + + c += strlen(fw_version); + if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) + return -EINVAL; + + /* search for feedback version */ + + size = rdev->vce_fw->size - strlen(fb_version) - 3; + c = rdev->vce_fw->data; + for (;size > 0; --size, ++c) + if (strncmp(c, fb_version, strlen(fb_version)) == 0) + break; + + if (size == 0) + return -EINVAL; + + c += strlen(fb_version); + if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) + return -EINVAL; + + DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n", + start, mid, end, rdev->vce.fb_version); + + rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); + + /* we can only work with this fw version for now */ + if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) + return -EINVAL; + + /* allocate firmware, stack and heap BO */ + + size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + + RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo); + if (r) { + dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); + return r; + } + + r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); + if (r) { + radeon_bo_unref(&rdev->vce.vcpu_bo); + dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); + return r; + } + + r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, + &rdev->vce.gpu_addr); + radeon_bo_unreserve(rdev->vce.vcpu_bo); + if (r) { + radeon_bo_unref(&rdev->vce.vcpu_bo); + dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); + return r; + } + + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + atomic_set(&rdev->vce.handles[i], 0); + rdev->vce.filp[i] = NULL; + } + + return 0; +} + +/** + * radeon_vce_fini - free memory + * + * @rdev: radeon_device pointer + * + * Last step on VCE teardown, free firmware memory + */ +void radeon_vce_fini(struct radeon_device *rdev) +{ + if (rdev->vce.vcpu_bo == NULL) + return; + + radeon_bo_unref(&rdev->vce.vcpu_bo); + + release_firmware(rdev->vce_fw); +} + +/** + * radeon_vce_suspend - unpin VCE fw memory + * + * @rdev: radeon_device pointer + * + */ +int radeon_vce_suspend(struct radeon_device *rdev) +{ + int i; + + if (rdev->vce.vcpu_bo == NULL) + return 0; + + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) + if (atomic_read(&rdev->vce.handles[i])) + break; + + if (i == RADEON_MAX_VCE_HANDLES) + return 0; + + /* TODO: suspending running encoding sessions isn't supported */ + return -EINVAL; +} + +/** + * radeon_vce_resume - pin VCE fw memory + * + * @rdev: radeon_device pointer + * + */ +int radeon_vce_resume(struct radeon_device *rdev) +{ + void *cpu_addr; + int r; + + if (rdev->vce.vcpu_bo == NULL) + return -EINVAL; + + r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); + if (r) { + dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); + return r; + } + + r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); + if (r) { + radeon_bo_unreserve(rdev->vce.vcpu_bo); + dev_err(rdev->dev, "(%d) VCE map failed\n", r); + return r; + } + + memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); + + radeon_bo_kunmap(rdev->vce.vcpu_bo); + + radeon_bo_unreserve(rdev->vce.vcpu_bo); + + return 0; +} + +/** + * radeon_vce_idle_work_handler - power off VCE + * + * @work: pointer to work structure + * + * power of VCE when it's not used any more + */ +static void radeon_vce_idle_work_handler(struct work_struct *work) +{ + struct radeon_device *rdev = + container_of(work, struct radeon_device, vce.idle_work.work); + + if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) && + (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) { + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + radeon_dpm_enable_vce(rdev, false); + } else { + radeon_set_vce_clocks(rdev, 0, 0); + } + } else { + schedule_delayed_work(&rdev->vce.idle_work, + msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); + } +} + +/** + * radeon_vce_note_usage - power up VCE + * + * @rdev: radeon_device pointer + * + * Make sure VCE is powerd up when we want to use it + */ +void radeon_vce_note_usage(struct radeon_device *rdev) +{ + bool streams_changed = false; + bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work); + set_clocks &= schedule_delayed_work(&rdev->vce.idle_work, + msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); + + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + /* XXX figure out if the streams changed */ + streams_changed = false; + } + + if (set_clocks || streams_changed) { + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + radeon_dpm_enable_vce(rdev, true); + } else { + radeon_set_vce_clocks(rdev, 53300, 40000); + } + } +} + +/** + * radeon_vce_free_handles - free still open VCE handles + * + * @rdev: radeon_device pointer + * @filp: drm file pointer + * + * Close all VCE handles still open by this file pointer + */ +void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp) +{ + int i, r; + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + uint32_t handle = atomic_read(&rdev->vce.handles[i]); + if (!handle || rdev->vce.filp[i] != filp) + continue; + + radeon_vce_note_usage(rdev); + + r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX, + handle, NULL); + if (r) + DRM_ERROR("Error destroying VCE handle (%d)!\n", r); + + rdev->vce.filp[i] = NULL; + atomic_set(&rdev->vce.handles[i], 0); + } +} + +/** + * radeon_vce_get_create_msg - generate a VCE create msg + * + * @rdev: radeon_device pointer + * @ring: ring we should submit the msg to + * @handle: VCE session handle to use + * @fence: optional fence to return + * + * Open up a stream for HW test + */ +int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, + uint32_t handle, struct radeon_fence **fence) +{ + const unsigned ib_size_dw = 1024; + struct radeon_ib ib; + uint64_t dummy; + int i, r; + + r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); + if (r) { + DRM_ERROR("radeon: failed to get ib (%d).\n", r); + return r; + } + + dummy = ib.gpu_addr + 1024; + + /* stitch together an VCE create msg */ + ib.length_dw = 0; + ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ + ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ + ib.ptr[ib.length_dw++] = handle; + + ib.ptr[ib.length_dw++] = 0x00000030; /* len */ + ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ + ib.ptr[ib.length_dw++] = 0x00000000; + ib.ptr[ib.length_dw++] = 0x00000042; + ib.ptr[ib.length_dw++] = 0x0000000a; + ib.ptr[ib.length_dw++] = 0x00000001; + ib.ptr[ib.length_dw++] = 0x00000080; + ib.ptr[ib.length_dw++] = 0x00000060; + ib.ptr[ib.length_dw++] = 0x00000100; + ib.ptr[ib.length_dw++] = 0x00000100; + ib.ptr[ib.length_dw++] = 0x0000000c; + ib.ptr[ib.length_dw++] = 0x00000000; + + ib.ptr[ib.length_dw++] = 0x00000014; /* len */ + ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ + ib.ptr[ib.length_dw++] = upper_32_bits(dummy); + ib.ptr[ib.length_dw++] = dummy; + ib.ptr[ib.length_dw++] = 0x00000001; + + for (i = ib.length_dw; i < ib_size_dw; ++i) + ib.ptr[i] = 0x0; + + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); + } + + if (fence) + *fence = radeon_fence_ref(ib.fence); + + radeon_ib_free(rdev, &ib); + + return r; +} + +/** + * radeon_vce_get_destroy_msg - generate a VCE destroy msg + * + * @rdev: radeon_device pointer + * @ring: ring we should submit the msg to + * @handle: VCE session handle to use + * @fence: optional fence to return + * + * Close up a stream for HW test or if userspace failed to do so + */ +int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, + uint32_t handle, struct radeon_fence **fence) +{ + const unsigned ib_size_dw = 1024; + struct radeon_ib ib; + uint64_t dummy; + int i, r; + + r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); + if (r) { + DRM_ERROR("radeon: failed to get ib (%d).\n", r); + return r; + } + + dummy = ib.gpu_addr + 1024; + + /* stitch together an VCE destroy msg */ + ib.length_dw = 0; + ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ + ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ + ib.ptr[ib.length_dw++] = handle; + + ib.ptr[ib.length_dw++] = 0x00000014; /* len */ + ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ + ib.ptr[ib.length_dw++] = upper_32_bits(dummy); + ib.ptr[ib.length_dw++] = dummy; + ib.ptr[ib.length_dw++] = 0x00000001; + + ib.ptr[ib.length_dw++] = 0x00000008; /* len */ + ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ + + for (i = ib.length_dw; i < ib_size_dw; ++i) + ib.ptr[i] = 0x0; + + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); + } + + if (fence) + *fence = radeon_fence_ref(ib.fence); + + radeon_ib_free(rdev, &ib); + + return r; +} + +/** + * radeon_vce_cs_reloc - command submission relocation + * + * @p: parser context + * @lo: address of lower dword + * @hi: address of higher dword + * @size: size of checker for relocation buffer + * + * Patch relocation inside command stream with real buffer address + */ +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, + unsigned size) +{ + struct radeon_cs_chunk *relocs_chunk; + struct radeon_cs_reloc *reloc; + uint64_t start, end, offset; + unsigned idx; + + relocs_chunk = &p->chunks[p->chunk_relocs_idx]; + offset = radeon_get_ib_value(p, lo); + idx = radeon_get_ib_value(p, hi); + + if (idx >= relocs_chunk->length_dw) { + DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", + idx, relocs_chunk->length_dw); + return -EINVAL; + } + + reloc = p->relocs_ptr[(idx / 4)]; + start = reloc->gpu_offset; + end = start + radeon_bo_size(reloc->robj); + start += offset; + + p->ib.ptr[lo] = start & 0xFFFFFFFF; + p->ib.ptr[hi] = start >> 32; + + if (end <= start) { + DRM_ERROR("invalid reloc offset %llX!\n", offset); + return -EINVAL; + } + if ((end - start) < size) { + DRM_ERROR("buffer to small (%d / %d)!\n", + (unsigned)(end - start), size); + return -EINVAL; + } + + return 0; +} + +/** + * radeon_vce_validate_handle - validate stream handle + * + * @p: parser context + * @handle: handle to validate + * + * Validates the handle and return the found session index or -EINVAL + * we we don't have another free session index. + */ +int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +{ + unsigned i; + + /* validate the handle */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (atomic_read(&p->rdev->vce.handles[i]) == handle) + return i; + } + + /* handle not found try to alloc a new one */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { + p->rdev->vce.filp[i] = p->filp; + p->rdev->vce.img_size[i] = 0; + return i; + } + } + + DRM_ERROR("No more free VCE handles!\n"); + return -EINVAL; +} + +/** + * radeon_vce_cs_parse - parse and validate the command stream + * + * @p: parser context + * + */ +int radeon_vce_cs_parse(struct radeon_cs_parser *p) +{ + int session_idx = -1; + bool destroyed = false; + uint32_t tmp, handle = 0; + uint32_t *size = &tmp; + int i, r; + + while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { + uint32_t len = radeon_get_ib_value(p, p->idx); + uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); + + if ((len < 8) || (len & 3)) { + DRM_ERROR("invalid VCE command length (%d)!\n", len); + return -EINVAL; + } + + if (destroyed) { + DRM_ERROR("No other command allowed after destroy!\n"); + return -EINVAL; + } + + switch (cmd) { + case 0x00000001: // session + handle = radeon_get_ib_value(p, p->idx + 2); + session_idx = radeon_vce_validate_handle(p, handle); + if (session_idx < 0) + return session_idx; + size = &p->rdev->vce.img_size[session_idx]; + break; + + case 0x00000002: // task info + break; + + case 0x01000001: // create + *size = radeon_get_ib_value(p, p->idx + 8) * + radeon_get_ib_value(p, p->idx + 10) * + 8 * 3 / 2; + break; + + case 0x04000001: // config extension + case 0x04000002: // pic control + case 0x04000005: // rate control + case 0x04000007: // motion estimation + case 0x04000008: // rdo + break; + + case 0x03000001: // encode + r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, + *size); + if (r) + return r; + + r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, + *size / 3); + if (r) + return r; + break; + + case 0x02000001: // destroy + destroyed = true; + break; + + case 0x05000001: // context buffer + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + *size * 2); + if (r) + return r; + break; + + case 0x05000004: // video bitstream buffer + tmp = radeon_get_ib_value(p, p->idx + 4); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + tmp); + if (r) + return r; + break; + + case 0x05000005: // feedback buffer + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + 4096); + if (r) + return r; + break; + + default: + DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); + return -EINVAL; + } + + if (session_idx == -1) { + DRM_ERROR("no session command at start of IB\n"); + return -EINVAL; + } + + p->idx += len / 4; + } + + if (destroyed) { + /* IB contains a destroy msg, free the handle */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) + atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); + } + + return 0; +} + +/** + * radeon_vce_semaphore_emit - emit a semaphore command + * + * @rdev: radeon_device pointer + * @ring: engine to use + * @semaphore: address of semaphore + * @emit_wait: true=emit wait, false=emit signal + * + */ +bool radeon_vce_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + radeon_ring_write(ring, VCE_CMD_SEMAPHORE); + radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); + radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); + radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); + if (!emit_wait) + radeon_ring_write(ring, VCE_CMD_END); + + return true; +} + +/** + * radeon_vce_ib_execute - execute indirect buffer + * + * @rdev: radeon_device pointer + * @ib: the IB to execute + * + */ +void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + radeon_ring_write(ring, VCE_CMD_IB); + radeon_ring_write(ring, ib->gpu_addr); + radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); + radeon_ring_write(ring, ib->length_dw); +} + +/** + * radeon_vce_fence_emit - add a fence command to the ring + * + * @rdev: radeon_device pointer + * @fence: the fence + * + */ +void radeon_vce_fence_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; + + radeon_ring_write(ring, VCE_CMD_FENCE); + radeon_ring_write(ring, addr); + radeon_ring_write(ring, upper_32_bits(addr)); + radeon_ring_write(ring, fence->seq); + radeon_ring_write(ring, VCE_CMD_TRAP); + radeon_ring_write(ring, VCE_CMD_END); +} + +/** + * radeon_vce_ring_test - test if VCE ring is working + * + * @rdev: radeon_device pointer + * @ring: the engine to test on + * + */ +int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + uint32_t rptr = vce_v1_0_get_rptr(rdev, ring); + unsigned i; + int r; + + r = radeon_ring_lock(rdev, ring, 16); + if (r) { + DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n", + ring->idx, r); + return r; + } + radeon_ring_write(ring, VCE_CMD_END); + radeon_ring_unlock_commit(rdev, ring); + + for (i = 0; i < rdev->usec_timeout; i++) { + if (vce_v1_0_get_rptr(rdev, ring) != rptr) + break; + DRM_UDELAY(1); + } + + if (i < rdev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", + ring->idx, i); + } else { + DRM_ERROR("radeon: ring %d test failed\n", + ring->idx); + r = -ETIMEDOUT; + } + + return r; +} + +/** + * radeon_vce_ib_test - test if VCE IBs are working + * + * @rdev: radeon_device pointer + * @ring: the engine to test on + * + */ +int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + struct radeon_fence *fence = NULL; + int r; + + r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL); + if (r) { + DRM_ERROR("radeon: failed to get create msg (%d).\n", r); + goto error; + } + + r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence); + if (r) { + DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); + goto error; + } + + r = radeon_fence_wait(fence, false); + if (r) { + DRM_ERROR("radeon: fence wait failed (%d).\n", r); + } else { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + } +error: + radeon_fence_unref(&fence); + return r; +} diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c new file mode 100644 index 00000000000..725d3669014 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -0,0 +1,1089 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#include <drm/drmP.h> +#include <drm/radeon_drm.h> +#include "radeon.h" +#include "radeon_trace.h" + +/* + * GPUVM + * GPUVM is similar to the legacy gart on older asics, however + * rather than there being a single global gart table + * for the entire GPU, there are multiple VM page tables active + * at any given time. The VM page tables can contain a mix + * vram pages and system memory pages and system memory pages + * can be mapped as snooped (cached system pages) or unsnooped + * (uncached system pages). + * Each VM has an ID associated with it and there is a page table + * associated with each VMID. When execting a command buffer, + * the kernel tells the the ring what VMID to use for that command + * buffer. VMIDs are allocated dynamically as commands are submitted. + * The userspace drivers maintain their own address space and the kernel + * sets up their pages tables accordingly when they submit their + * command buffers and a VMID is assigned. + * Cayman/Trinity support up to 8 active VMs at any given time; + * SI supports 16. + */ + +/** + * radeon_vm_num_pde - return the number of page directory entries + * + * @rdev: radeon_device pointer + * + * Calculate the number of page directory entries (cayman+). + */ +static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) +{ + return rdev->vm_manager.max_pfn >> radeon_vm_block_size; +} + +/** + * radeon_vm_directory_size - returns the size of the page directory in bytes + * + * @rdev: radeon_device pointer + * + * Calculate the size of the page directory in bytes (cayman+). + */ +static unsigned radeon_vm_directory_size(struct radeon_device *rdev) +{ + return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); +} + +/** + * radeon_vm_manager_init - init the vm manager + * + * @rdev: radeon_device pointer + * + * Init the vm manager (cayman+). + * Returns 0 for success, error for failure. + */ +int radeon_vm_manager_init(struct radeon_device *rdev) +{ + int r; + + if (!rdev->vm_manager.enabled) { + r = radeon_asic_vm_init(rdev); + if (r) + return r; + + rdev->vm_manager.enabled = true; + } + return 0; +} + +/** + * radeon_vm_manager_fini - tear down the vm manager + * + * @rdev: radeon_device pointer + * + * Tear down the VM manager (cayman+). + */ +void radeon_vm_manager_fini(struct radeon_device *rdev) +{ + int i; + + if (!rdev->vm_manager.enabled) + return; + + for (i = 0; i < RADEON_NUM_VM; ++i) + radeon_fence_unref(&rdev->vm_manager.active[i]); + radeon_asic_vm_fini(rdev); + rdev->vm_manager.enabled = false; +} + +/** + * radeon_vm_get_bos - add the vm BOs to a validation list + * + * @vm: vm providing the BOs + * @head: head of validation list + * + * Add the page directory to the list of BOs to + * validate for command submission (cayman+). + */ +struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, + struct radeon_vm *vm, + struct list_head *head) +{ + struct radeon_cs_reloc *list; + unsigned i, idx; + + list = kmalloc_array(vm->max_pde_used + 2, + sizeof(struct radeon_cs_reloc), GFP_KERNEL); + if (!list) + return NULL; + + /* add the vm page table to the list */ + list[0].gobj = NULL; + list[0].robj = vm->page_directory; + list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; + list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; + list[0].tv.bo = &vm->page_directory->tbo; + list[0].tiling_flags = 0; + list[0].handle = 0; + list_add(&list[0].tv.head, head); + + for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { + if (!vm->page_tables[i].bo) + continue; + + list[idx].gobj = NULL; + list[idx].robj = vm->page_tables[i].bo; + list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; + list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; + list[idx].tv.bo = &list[idx].robj->tbo; + list[idx].tiling_flags = 0; + list[idx].handle = 0; + list_add(&list[idx++].tv.head, head); + } + + return list; +} + +/** + * radeon_vm_grab_id - allocate the next free VMID + * + * @rdev: radeon_device pointer + * @vm: vm to allocate id for + * @ring: ring we want to submit job to + * + * Allocate an id for the vm (cayman+). + * Returns the fence we need to sync to (if any). + * + * Global and local mutex must be locked! + */ +struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, + struct radeon_vm *vm, int ring) +{ + struct radeon_fence *best[RADEON_NUM_RINGS] = {}; + unsigned choices[2] = {}; + unsigned i; + + /* check if the id is still valid */ + if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) + return NULL; + + /* we definately need to flush */ + radeon_fence_unref(&vm->last_flush); + + /* skip over VMID 0, since it is the system VM */ + for (i = 1; i < rdev->vm_manager.nvm; ++i) { + struct radeon_fence *fence = rdev->vm_manager.active[i]; + + if (fence == NULL) { + /* found a free one */ + vm->id = i; + trace_radeon_vm_grab_id(vm->id, ring); + return NULL; + } + + if (radeon_fence_is_earlier(fence, best[fence->ring])) { + best[fence->ring] = fence; + choices[fence->ring == ring ? 0 : 1] = i; + } + } + + for (i = 0; i < 2; ++i) { + if (choices[i]) { + vm->id = choices[i]; + trace_radeon_vm_grab_id(vm->id, ring); + return rdev->vm_manager.active[choices[i]]; + } + } + + /* should never happen */ + BUG(); + return NULL; +} + +/** + * radeon_vm_flush - hardware flush the vm + * + * @rdev: radeon_device pointer + * @vm: vm we want to flush + * @ring: ring to use for flush + * + * Flush the vm (cayman+). + * + * Global and local mutex must be locked! + */ +void radeon_vm_flush(struct radeon_device *rdev, + struct radeon_vm *vm, + int ring) +{ + uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); + + /* if we can't remember our last VM flush then flush now! */ + /* XXX figure out why we have to flush all the time */ + if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) { + vm->pd_gpu_addr = pd_addr; + radeon_ring_vm_flush(rdev, ring, vm); + } +} + +/** + * radeon_vm_fence - remember fence for vm + * + * @rdev: radeon_device pointer + * @vm: vm we want to fence + * @fence: fence to remember + * + * Fence the vm (cayman+). + * Set the fence used to protect page table and id. + * + * Global and local mutex must be locked! + */ +void radeon_vm_fence(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_fence *fence) +{ + radeon_fence_unref(&vm->fence); + vm->fence = radeon_fence_ref(fence); + + radeon_fence_unref(&rdev->vm_manager.active[vm->id]); + rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); + + radeon_fence_unref(&vm->last_id_use); + vm->last_id_use = radeon_fence_ref(fence); + + /* we just flushed the VM, remember that */ + if (!vm->last_flush) + vm->last_flush = radeon_fence_ref(fence); +} + +/** + * radeon_vm_bo_find - find the bo_va for a specific vm & bo + * + * @vm: requested vm + * @bo: requested buffer object + * + * Find @bo inside the requested vm (cayman+). + * Search inside the @bos vm list for the requested vm + * Returns the found bo_va or NULL if none is found + * + * Object has to be reserved! + */ +struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, + struct radeon_bo *bo) +{ + struct radeon_bo_va *bo_va; + + list_for_each_entry(bo_va, &bo->va, bo_list) { + if (bo_va->vm == vm) { + return bo_va; + } + } + return NULL; +} + +/** + * radeon_vm_bo_add - add a bo to a specific vm + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @bo: radeon buffer object + * + * Add @bo into the requested vm (cayman+). + * Add @bo to the list of bos associated with the vm + * Returns newly added bo_va or NULL for failure + * + * Object has to be reserved! + */ +struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo) +{ + struct radeon_bo_va *bo_va; + + bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); + if (bo_va == NULL) { + return NULL; + } + bo_va->vm = vm; + bo_va->bo = bo; + bo_va->soffset = 0; + bo_va->eoffset = 0; + bo_va->flags = 0; + bo_va->valid = false; + bo_va->ref_count = 1; + INIT_LIST_HEAD(&bo_va->bo_list); + INIT_LIST_HEAD(&bo_va->vm_list); + INIT_LIST_HEAD(&bo_va->vm_status); + + mutex_lock(&vm->mutex); + list_add(&bo_va->vm_list, &vm->va); + list_add_tail(&bo_va->bo_list, &bo->va); + mutex_unlock(&vm->mutex); + + return bo_va; +} + +/** + * radeon_vm_clear_bo - initially clear the page dir/table + * + * @rdev: radeon_device pointer + * @bo: bo to clear + */ +static int radeon_vm_clear_bo(struct radeon_device *rdev, + struct radeon_bo *bo) +{ + struct ttm_validate_buffer tv; + struct ww_acquire_ctx ticket; + struct list_head head; + struct radeon_ib ib; + unsigned entries; + uint64_t addr; + int r; + + memset(&tv, 0, sizeof(tv)); + tv.bo = &bo->tbo; + + INIT_LIST_HEAD(&head); + list_add(&tv.head, &head); + + r = ttm_eu_reserve_buffers(&ticket, &head); + if (r) + return r; + + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + if (r) + goto error; + + addr = radeon_bo_gpu_offset(bo); + entries = radeon_bo_size(bo) / 8; + + r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, + NULL, entries * 2 + 64); + if (r) + goto error; + + ib.length_dw = 0; + + radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); + + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) + goto error; + + ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); + radeon_ib_free(rdev, &ib); + + return 0; + +error: + ttm_eu_backoff_reservation(&ticket, &head); + return r; +} + +/** + * radeon_vm_bo_set_addr - set bos virtual address inside a vm + * + * @rdev: radeon_device pointer + * @bo_va: bo_va to store the address + * @soffset: requested offset of the buffer in the VM address space + * @flags: attributes of pages (read/write/valid/etc.) + * + * Set offset of @bo_va (cayman+). + * Validate and set the offset requested within the vm address space. + * Returns 0 for success, error for failure. + * + * Object has to be reserved! + */ +int radeon_vm_bo_set_addr(struct radeon_device *rdev, + struct radeon_bo_va *bo_va, + uint64_t soffset, + uint32_t flags) +{ + uint64_t size = radeon_bo_size(bo_va->bo); + uint64_t eoffset, last_offset = 0; + struct radeon_vm *vm = bo_va->vm; + struct radeon_bo_va *tmp; + struct list_head *head; + unsigned last_pfn, pt_idx; + int r; + + if (soffset) { + /* make sure object fit at this offset */ + eoffset = soffset + size; + if (soffset >= eoffset) { + return -EINVAL; + } + + last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; + if (last_pfn > rdev->vm_manager.max_pfn) { + dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", + last_pfn, rdev->vm_manager.max_pfn); + return -EINVAL; + } + + } else { + eoffset = last_pfn = 0; + } + + mutex_lock(&vm->mutex); + head = &vm->va; + last_offset = 0; + list_for_each_entry(tmp, &vm->va, vm_list) { + if (bo_va == tmp) { + /* skip over currently modified bo */ + continue; + } + + if (soffset >= last_offset && eoffset <= tmp->soffset) { + /* bo can be added before this one */ + break; + } + if (eoffset > tmp->soffset && soffset < tmp->eoffset) { + /* bo and tmp overlap, invalid offset */ + dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", + bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, + (unsigned)tmp->soffset, (unsigned)tmp->eoffset); + mutex_unlock(&vm->mutex); + return -EINVAL; + } + last_offset = tmp->eoffset; + head = &tmp->vm_list; + } + + if (bo_va->soffset) { + /* add a clone of the bo_va to clear the old address */ + tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); + if (!tmp) { + mutex_unlock(&vm->mutex); + return -ENOMEM; + } + tmp->soffset = bo_va->soffset; + tmp->eoffset = bo_va->eoffset; + tmp->vm = vm; + list_add(&tmp->vm_status, &vm->freed); + } + + bo_va->soffset = soffset; + bo_va->eoffset = eoffset; + bo_va->flags = flags; + bo_va->valid = false; + list_move(&bo_va->vm_list, head); + + soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; + eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; + + BUG_ON(eoffset >= radeon_vm_num_pdes(rdev)); + + if (eoffset > vm->max_pde_used) + vm->max_pde_used = eoffset; + + radeon_bo_unreserve(bo_va->bo); + + /* walk over the address space and allocate the page tables */ + for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) { + struct radeon_bo *pt; + + if (vm->page_tables[pt_idx].bo) + continue; + + /* drop mutex to allocate and clear page table */ + mutex_unlock(&vm->mutex); + + r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, + RADEON_GPU_PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, NULL, &pt); + if (r) + return r; + + r = radeon_vm_clear_bo(rdev, pt); + if (r) { + radeon_bo_unref(&pt); + radeon_bo_reserve(bo_va->bo, false); + return r; + } + + /* aquire mutex again */ + mutex_lock(&vm->mutex); + if (vm->page_tables[pt_idx].bo) { + /* someone else allocated the pt in the meantime */ + mutex_unlock(&vm->mutex); + radeon_bo_unref(&pt); + mutex_lock(&vm->mutex); + continue; + } + + vm->page_tables[pt_idx].addr = 0; + vm->page_tables[pt_idx].bo = pt; + } + + mutex_unlock(&vm->mutex); + return radeon_bo_reserve(bo_va->bo, false); +} + +/** + * radeon_vm_map_gart - get the physical address of a gart page + * + * @rdev: radeon_device pointer + * @addr: the unmapped addr + * + * Look up the physical address of the page that the pte resolves + * to (cayman+). + * Returns the physical address of the page. + */ +uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) +{ + uint64_t result; + + /* page table offset */ + result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; + + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); + + return result; +} + +/** + * radeon_vm_page_flags - translate page flags to what the hw uses + * + * @flags: flags comming from userspace + * + * Translate the flags the userspace ABI uses to hw flags. + */ +static uint32_t radeon_vm_page_flags(uint32_t flags) +{ + uint32_t hw_flags = 0; + hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; + hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; + hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; + if (flags & RADEON_VM_PAGE_SYSTEM) { + hw_flags |= R600_PTE_SYSTEM; + hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; + } + return hw_flags; +} + +/** + * radeon_vm_update_pdes - make sure that page directory is valid + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * + * Allocates new page tables if necessary + * and updates the page directory (cayman+). + * Returns 0 for success, error for failure. + * + * Global and local mutex must be locked! + */ +int radeon_vm_update_page_directory(struct radeon_device *rdev, + struct radeon_vm *vm) +{ + struct radeon_bo *pd = vm->page_directory; + uint64_t pd_addr = radeon_bo_gpu_offset(pd); + uint32_t incr = RADEON_VM_PTE_COUNT * 8; + uint64_t last_pde = ~0, last_pt = ~0; + unsigned count = 0, pt_idx, ndw; + struct radeon_ib ib; + int r; + + /* padding, etc. */ + ndw = 64; + + /* assume the worst case */ + ndw += vm->max_pde_used * 16; + + /* update too big for an IB */ + if (ndw > 0xfffff) + return -ENOMEM; + + r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); + if (r) + return r; + ib.length_dw = 0; + + /* walk over the address space and update the page directory */ + for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { + struct radeon_bo *bo = vm->page_tables[pt_idx].bo; + uint64_t pde, pt; + + if (bo == NULL) + continue; + + pt = radeon_bo_gpu_offset(bo); + if (vm->page_tables[pt_idx].addr == pt) + continue; + vm->page_tables[pt_idx].addr = pt; + + pde = pd_addr + pt_idx * 8; + if (((last_pde + 8 * count) != pde) || + ((last_pt + incr * count) != pt)) { + + if (count) { + radeon_asic_vm_set_page(rdev, &ib, last_pde, + last_pt, count, incr, + R600_PTE_VALID); + } + + count = 1; + last_pde = pde; + last_pt = pt; + } else { + ++count; + } + } + + if (count) + radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, + incr, R600_PTE_VALID); + + if (ib.length_dw != 0) { + radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); + radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + radeon_ib_free(rdev, &ib); + return r; + } + radeon_fence_unref(&vm->fence); + vm->fence = radeon_fence_ref(ib.fence); + radeon_fence_unref(&vm->last_flush); + } + radeon_ib_free(rdev, &ib); + + return 0; +} + +/** + * radeon_vm_frag_ptes - add fragment information to PTEs + * + * @rdev: radeon_device pointer + * @ib: IB for the update + * @pe_start: first PTE to handle + * @pe_end: last PTE to handle + * @addr: addr those PTEs should point to + * @flags: hw mapping flags + * + * Global and local mutex must be locked! + */ +static void radeon_vm_frag_ptes(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe_start, uint64_t pe_end, + uint64_t addr, uint32_t flags) +{ + /** + * The MC L1 TLB supports variable sized pages, based on a fragment + * field in the PTE. When this field is set to a non-zero value, page + * granularity is increased from 4KB to (1 << (12 + frag)). The PTE + * flags are considered valid for all PTEs within the fragment range + * and corresponding mappings are assumed to be physically contiguous. + * + * The L1 TLB can store a single PTE for the whole fragment, + * significantly increasing the space available for translation + * caching. This leads to large improvements in throughput when the + * TLB is under pressure. + * + * The L2 TLB distributes small and large fragments into two + * asymmetric partitions. The large fragment cache is significantly + * larger. Thus, we try to use large fragments wherever possible. + * Userspace can support this by aligning virtual base address and + * allocation size to the fragment size. + */ + + /* NI is optimized for 256KB fragments, SI and newer for 64KB */ + uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? + R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; + uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; + + uint64_t frag_start = ALIGN(pe_start, frag_align); + uint64_t frag_end = pe_end & ~(frag_align - 1); + + unsigned count; + + /* system pages are non continuously */ + if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) || + (frag_start >= frag_end)) { + + count = (pe_end - pe_start) / 8; + radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, + RADEON_GPU_PAGE_SIZE, flags); + return; + } + + /* handle the 4K area at the beginning */ + if (pe_start != frag_start) { + count = (frag_start - pe_start) / 8; + radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, + RADEON_GPU_PAGE_SIZE, flags); + addr += RADEON_GPU_PAGE_SIZE * count; + } + + /* handle the area in the middle */ + count = (frag_end - frag_start) / 8; + radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count, + RADEON_GPU_PAGE_SIZE, flags | frag_flags); + + /* handle the 4K area at the end */ + if (frag_end != pe_end) { + addr += RADEON_GPU_PAGE_SIZE * count; + count = (pe_end - frag_end) / 8; + radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count, + RADEON_GPU_PAGE_SIZE, flags); + } +} + +/** + * radeon_vm_update_ptes - make sure that page tables are valid + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * @dst: destination address to map to + * @flags: mapping flags + * + * Update the page tables in the range @start - @end (cayman+). + * + * Global and local mutex must be locked! + */ +static void radeon_vm_update_ptes(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_ib *ib, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) +{ + uint64_t mask = RADEON_VM_PTE_COUNT - 1; + uint64_t last_pte = ~0, last_dst = ~0; + unsigned count = 0; + uint64_t addr; + + start = start / RADEON_GPU_PAGE_SIZE; + end = end / RADEON_GPU_PAGE_SIZE; + + /* walk over the address space and update the page tables */ + for (addr = start; addr < end; ) { + uint64_t pt_idx = addr >> radeon_vm_block_size; + struct radeon_bo *pt = vm->page_tables[pt_idx].bo; + unsigned nptes; + uint64_t pte; + + radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = RADEON_VM_PTE_COUNT - (addr & mask); + + pte = radeon_bo_gpu_offset(pt); + pte += (addr & mask) * 8; + + if ((last_pte + 8 * count) != pte) { + + if (count) { + radeon_vm_frag_ptes(rdev, ib, last_pte, + last_pte + 8 * count, + last_dst, flags); + } + + count = nptes; + last_pte = pte; + last_dst = dst; + } else { + count += nptes; + } + + addr += nptes; + dst += nptes * RADEON_GPU_PAGE_SIZE; + } + + if (count) { + radeon_vm_frag_ptes(rdev, ib, last_pte, + last_pte + 8 * count, + last_dst, flags); + } +} + +/** + * radeon_vm_bo_update - map a bo into the vm page table + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @bo: radeon buffer object + * @mem: ttm mem + * + * Fill in the page table entries for @bo (cayman+). + * Returns 0 for success, -EINVAL for failure. + * + * Object have to be reserved and mutex must be locked! + */ +int radeon_vm_bo_update(struct radeon_device *rdev, + struct radeon_bo_va *bo_va, + struct ttm_mem_reg *mem) +{ + struct radeon_vm *vm = bo_va->vm; + struct radeon_ib ib; + unsigned nptes, ndw; + uint64_t addr; + int r; + + + if (!bo_va->soffset) { + dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", + bo_va->bo, vm); + return -EINVAL; + } + + if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) + return 0; + + bo_va->flags &= ~RADEON_VM_PAGE_VALID; + bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; + if (mem) { + addr = mem->start << PAGE_SHIFT; + if (mem->mem_type != TTM_PL_SYSTEM) { + bo_va->flags |= RADEON_VM_PAGE_VALID; + bo_va->valid = true; + } + if (mem->mem_type == TTM_PL_TT) { + bo_va->flags |= RADEON_VM_PAGE_SYSTEM; + } else { + addr += rdev->vm_manager.vram_base_offset; + } + } else { + addr = 0; + bo_va->valid = false; + } + + trace_radeon_vm_bo_update(bo_va); + + nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE; + + /* padding, etc. */ + ndw = 64; + + if (radeon_vm_block_size > 11) + /* reserve space for one header for every 2k dwords */ + ndw += (nptes >> 11) * 4; + else + /* reserve space for one header for + every (1 << BLOCK_SIZE) entries */ + ndw += (nptes >> radeon_vm_block_size) * 4; + + /* reserve space for pte addresses */ + ndw += nptes * 2; + + /* update too big for an IB */ + if (ndw > 0xfffff) + return -ENOMEM; + + r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); + if (r) + return r; + ib.length_dw = 0; + + radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, + addr, radeon_vm_page_flags(bo_va->flags)); + + radeon_semaphore_sync_to(ib.semaphore, vm->fence); + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + radeon_ib_free(rdev, &ib); + return r; + } + radeon_fence_unref(&vm->fence); + vm->fence = radeon_fence_ref(ib.fence); + radeon_ib_free(rdev, &ib); + radeon_fence_unref(&vm->last_flush); + + return 0; +} + +/** + * radeon_vm_clear_freed - clear freed BOs in the PT + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Make sure all freed BOs are cleared in the PT. + * Returns 0 for success. + * + * PTs have to be reserved and mutex must be locked! + */ +int radeon_vm_clear_freed(struct radeon_device *rdev, + struct radeon_vm *vm) +{ + struct radeon_bo_va *bo_va, *tmp; + int r; + + list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { + list_del(&bo_va->vm_status); + r = radeon_vm_bo_update(rdev, bo_va, NULL); + kfree(bo_va); + if (r) + return r; + } + return 0; + +} + +/** + * radeon_vm_bo_rmv - remove a bo to a specific vm + * + * @rdev: radeon_device pointer + * @bo_va: requested bo_va + * + * Remove @bo_va->bo from the requested vm (cayman+). + * + * Object have to be reserved! + */ +void radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_bo_va *bo_va) +{ + struct radeon_vm *vm = bo_va->vm; + + list_del(&bo_va->bo_list); + + mutex_lock(&vm->mutex); + list_del(&bo_va->vm_list); + + if (bo_va->soffset) { + bo_va->bo = NULL; + list_add(&bo_va->vm_status, &vm->freed); + } else { + kfree(bo_va); + } + + mutex_unlock(&vm->mutex); +} + +/** + * radeon_vm_bo_invalidate - mark the bo as invalid + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @bo: radeon buffer object + * + * Mark @bo as invalid (cayman+). + */ +void radeon_vm_bo_invalidate(struct radeon_device *rdev, + struct radeon_bo *bo) +{ + struct radeon_bo_va *bo_va; + + list_for_each_entry(bo_va, &bo->va, bo_list) { + bo_va->valid = false; + } +} + +/** + * radeon_vm_init - initialize a vm instance + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Init @vm fields (cayman+). + */ +int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) +{ + const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE, + RADEON_VM_PTE_COUNT * 8); + unsigned pd_size, pd_entries, pts_size; + int r; + + vm->id = 0; + vm->ib_bo_va = NULL; + vm->fence = NULL; + vm->last_flush = NULL; + vm->last_id_use = NULL; + mutex_init(&vm->mutex); + INIT_LIST_HEAD(&vm->va); + INIT_LIST_HEAD(&vm->freed); + + pd_size = radeon_vm_directory_size(rdev); + pd_entries = radeon_vm_num_pdes(rdev); + + /* allocate page table array */ + pts_size = pd_entries * sizeof(struct radeon_vm_pt); + vm->page_tables = kzalloc(pts_size, GFP_KERNEL); + if (vm->page_tables == NULL) { + DRM_ERROR("Cannot allocate memory for page table array\n"); + return -ENOMEM; + } + + r = radeon_bo_create(rdev, pd_size, align, true, + RADEON_GEM_DOMAIN_VRAM, NULL, + &vm->page_directory); + if (r) + return r; + + r = radeon_vm_clear_bo(rdev, vm->page_directory); + if (r) { + radeon_bo_unref(&vm->page_directory); + vm->page_directory = NULL; + return r; + } + + return 0; +} + +/** + * radeon_vm_fini - tear down a vm instance + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Tear down @vm (cayman+). + * Unbind the VM and remove all bos from the vm bo list + */ +void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) +{ + struct radeon_bo_va *bo_va, *tmp; + int i, r; + + if (!list_empty(&vm->va)) { + dev_err(rdev->dev, "still active bo inside vm\n"); + } + list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { + list_del_init(&bo_va->vm_list); + r = radeon_bo_reserve(bo_va->bo, false); + if (!r) { + list_del_init(&bo_va->bo_list); + radeon_bo_unreserve(bo_va->bo); + kfree(bo_va); + } + } + list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) + kfree(bo_va); + + for (i = 0; i < radeon_vm_num_pdes(rdev); i++) + radeon_bo_unref(&vm->page_tables[i].bo); + kfree(vm->page_tables); + + radeon_bo_unref(&vm->page_directory); + + radeon_fence_unref(&vm->fence); + radeon_fence_unref(&vm->last_flush); + radeon_fence_unref(&vm->last_id_use); + + mutex_destroy(&vm->mutex); +} diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 20bfbda7b3f..ec0c6829c1d 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -18,6 +18,7 @@ r600 0x9400 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL 0x00028A40 VGT_GS_MODE 0x00028A6C VGT_GS_OUT_PRIM_TYPE +0x00028B38 VGT_GS_MAX_VERT_OUT 0x000088C8 VGT_GS_PER_ES 0x000088E8 VGT_GS_PER_VS 0x000088D4 VGT_GS_VERTEX_REUSE diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b5c2369cda2..a0f96decece 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -212,21 +212,16 @@ void rs400_gart_fini(struct radeon_device *rdev) #define RS400_PTE_WRITEABLE (1 << 2) #define RS400_PTE_READABLE (1 << 3) -int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr) { uint32_t entry; u32 *gtt = rdev->gart.ptr; - if (i < 0 || i > rdev->gart.num_gpu_pages) { - return -EINVAL; - } - entry = (lower_32_bits(addr) & PAGE_MASK) | ((upper_32_bits(addr) & 0xff) << 4) | RS400_PTE_WRITEABLE | RS400_PTE_READABLE; entry = cpu_to_le32(entry); gtt[i] = entry; - return 0; } int rs400_mc_wait_for_idle(struct radeon_device *rdev) @@ -474,8 +469,6 @@ int rs400_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs400_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index fdcde769303..d1a35cb1c91 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -109,19 +109,7 @@ void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) } } -void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) -{ - /* enable the pflip int */ - radeon_irq_kms_pflip_irq_get(rdev, crtc); -} - -void rs600_post_page_flip(struct radeon_device *rdev, int crtc) -{ - /* disable the pflip int */ - radeon_irq_kms_pflip_irq_put(rdev, crtc); -} - -u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); @@ -148,9 +136,15 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) /* Unlock the lock, so double-buffering can take place inside vblank */ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); +} + +bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; /* Return current update_pending status: */ - return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; + return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & + AVIVO_D1GRPH_SURFACE_UPDATE_PENDING); } void avivo_program_fmt(struct drm_encoder *encoder) @@ -632,24 +626,16 @@ static void rs600_gart_fini(struct radeon_device *rdev) radeon_gart_table_vram_free(rdev); } -#define R600_PTE_VALID (1 << 0) -#define R600_PTE_SYSTEM (1 << 1) -#define R600_PTE_SNOOPED (1 << 2) -#define R600_PTE_READABLE (1 << 5) -#define R600_PTE_WRITEABLE (1 << 6) - -int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr) { void __iomem *ptr = (void *)rdev->gart.ptr; - if (i < 0 || i > rdev->gart.num_gpu_pages) { - return -EINVAL; - } addr = addr & 0xFFFFFFFFFFFFF000ULL; - addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; - addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE; + if (addr == rdev->dummy_page.addr) + addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; + else + addr |= R600_PTE_GART; writeq(addr, ptr + (i * 8)); - return 0; } int rs600_irq_set(struct radeon_device *rdev) @@ -787,7 +773,7 @@ int rs600_irq_process(struct radeon_device *rdev) wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); + radeon_crtc_handle_vblank(rdev, 0); } if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (rdev->irq.crtc_vblank_int[1]) { @@ -796,7 +782,7 @@ int rs600_irq_process(struct radeon_device *rdev) wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); + radeon_crtc_handle_vblank(rdev, 1); } if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { queue_hotplug = true; @@ -1048,8 +1034,6 @@ int rs600_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs600_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 35950738bd5..3462b64369b 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs690_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index 8512085b0ae..02f7710de47 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c @@ -807,9 +807,6 @@ static int rs780_parse_power_table(struct radeon_device *rdev) power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < power_info->pplib.ucNumStates; i++) { power_state = (union pplib_power_state *) @@ -859,6 +856,10 @@ int rs780_dpm_init(struct radeon_device *rdev) return -ENOMEM; rdev->pm.dpm.priv = pi; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = rs780_parse_power_table(rdev); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 98e8138ff77..3e21e869015 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); @@ -586,8 +587,6 @@ int rv515_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rv515_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index bebf31c4d84..e7045b08571 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -1891,9 +1891,6 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < power_info->pplib.ucNumStates; i++) { power_state = (union pplib_power_state *) @@ -1943,6 +1940,10 @@ int rv6xx_dpm_init(struct radeon_device *rdev) return -ENOMEM; rdev->pm.dpm.priv = pi; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = rv6xx_parse_power_table(rdev); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 6c772e58c78..da8703d8d45 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -801,7 +801,7 @@ u32 rv770_get_xclk(struct radeon_device *rdev) return reference_clock; } -u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); @@ -835,9 +835,15 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) /* Unlock the lock, so double-buffering can take place inside vblank */ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); +} + +bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; /* Return current update_pending status: */ - return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; + return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & + AVIVO_D1GRPH_SURFACE_UPDATE_PENDING); } /* get temperature in millidegrees */ @@ -1321,6 +1327,9 @@ static void rv770_gpu_init(struct radeon_device *rdev) if (tmp < rdev->config.rv770.max_simds) { rdev->config.rv770.max_simds = tmp; } + tmp = rdev->config.rv770.max_simds - + r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); + rdev->config.rv770.active_simds = tmp; switch (rdev->config.rv770.max_tile_pipes) { case 1: @@ -1811,7 +1820,8 @@ int rv770_resume(struct radeon_device *rdev) /* init golden registers */ rv770_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = rv770_startup(rdev); @@ -1955,9 +1965,9 @@ void rv770_fini(struct radeon_device *rdev) radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - rv770_pcie_gart_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + rv770_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index aca8cbe8a33..bbf2e076ee4 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 80c595aba35..3c76e1dcdf0 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_ps *ps = rv770_get_ps(rps); u32 sclk, mclk; - u16 vddc; struct rv7xx_pl *pl; switch (index) { @@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, /* patch up vddc if necessary */ if (pl->vddc == 0xff01) { - if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) - pl->vddc = vddc; + if (pi->max_vddc) + pl->vddc = pi->max_vddc; } if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { @@ -2282,9 +2281,6 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < power_info->pplib.ucNumStates; i++) { power_state = (union pplib_power_state *) @@ -2333,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); - /* disable ss, causes hangs on some cayman boards */ - if (rdev->family == CHIP_CAYMAN) { - pi->sclk_ss = false; - pi->mclk_ss = false; - } - if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else @@ -2362,6 +2352,10 @@ int rv770_dpm_init(struct radeon_device *rdev) pi->min_vddc_in_table = 0; pi->max_vddc_in_table = 0; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = rv7xx_parse_power_table(rdev); if (ret) return ret; @@ -2527,14 +2521,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) { u32 vblank_time = r600_dpm_get_vblank_time(rdev); - u32 switch_limit = 300; - - /* quirks */ - /* ASUS K70AF */ - if ((rdev->pdev->device == 0x9553) && - (rdev->pdev->subsystem_vendor == 0x1043) && - (rdev->pdev->subsystem_device == 0x1c42)) - switch_limit = 200; + u32 switch_limit = 200; /* 300 */ /* RV770 */ /* mclk switching doesn't seem to work reliably on desktop RV770s */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 09ec4f6c53b..9e854fd016d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -39,33 +39,39 @@ MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); MODULE_FIRMWARE("radeon/TAHITI_me.bin"); MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); +MODULE_FIRMWARE("radeon/TAHITI_mc2.bin"); MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); MODULE_FIRMWARE("radeon/TAHITI_smc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); +MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin"); MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); MODULE_FIRMWARE("radeon/VERDE_ce.bin"); MODULE_FIRMWARE("radeon/VERDE_mc.bin"); +MODULE_FIRMWARE("radeon/VERDE_mc2.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); MODULE_FIRMWARE("radeon/VERDE_smc.bin"); MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); +MODULE_FIRMWARE("radeon/OLAND_mc2.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); MODULE_FIRMWARE("radeon/OLAND_smc.bin"); MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); MODULE_FIRMWARE("radeon/HAINAN_me.bin"); MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_mc2.bin"); MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); +static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); static void si_program_aspm(struct radeon_device *rdev); extern void sumo_rlc_fini(struct radeon_device *rdev); @@ -1467,36 +1473,33 @@ int si_mc_load_microcode(struct radeon_device *rdev) const __be32 *fw_data; u32 running, blackout = 0; u32 *io_mc_regs; - int i, ucode_size, regs_size; + int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; + ucode_size = rdev->mc_fw->size / 4; + switch (rdev->family) { case CHIP_TAHITI: io_mc_regs = (u32 *)&tahiti_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_PITCAIRN: io_mc_regs = (u32 *)&pitcairn_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_VERDE: default: io_mc_regs = (u32 *)&verde_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_OLAND: io_mc_regs = (u32 *)&oland_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_HAINAN: io_mc_regs = (u32 *)&hainan_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; } @@ -1552,7 +1555,7 @@ static int si_init_microcode(struct radeon_device *rdev) const char *chip_name; const char *rlc_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; - size_t smc_req_size; + size_t smc_req_size, mc2_req_size; char fw_name[30]; int err; @@ -1567,6 +1570,7 @@ static int si_init_microcode(struct radeon_device *rdev) ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = TAHITI_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4); break; case CHIP_PITCAIRN: @@ -1577,6 +1581,7 @@ static int si_init_microcode(struct radeon_device *rdev) ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4); break; case CHIP_VERDE: @@ -1587,6 +1592,7 @@ static int si_init_microcode(struct radeon_device *rdev) ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = VERDE_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4); break; case CHIP_OLAND: @@ -1596,7 +1602,7 @@ static int si_init_microcode(struct radeon_device *rdev) me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4); break; case CHIP_HAINAN: @@ -1606,7 +1612,7 @@ static int si_init_microcode(struct radeon_device *rdev) me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4); break; default: BUG(); @@ -1659,16 +1665,22 @@ static int si_init_microcode(struct radeon_device *rdev) err = -EINVAL; } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->mc_fw->size != mc_req_size) { + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)) { printk(KERN_ERR "si_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); err = -EINVAL; } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); @@ -2889,7 +2901,7 @@ static void si_gpu_init(struct radeon_device *rdev) u32 sx_debug_1; u32 hdp_host_path_cntl; u32 tmp; - int i, j; + int i, j, k; switch (rdev->family) { case CHIP_TAHITI: @@ -3087,6 +3099,14 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.max_sh_per_se, rdev->config.si.max_cu_per_sh); + for (i = 0; i < rdev->config.si.max_shader_engines; i++) { + for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { + for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { + rdev->config.si.active_cus += + hweight32(si_get_cu_active_bitmap(rdev, i, j)); + } + } + } /* set HW defaults for 3D engine */ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | @@ -3175,7 +3195,7 @@ void si_fence_ring_emit(struct radeon_device *rdev, /* EVENT_WRITE_EOP - flush caches, send int */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); @@ -3208,7 +3228,7 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, (1 << 8)); radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); radeon_ring_write(ring, next_rptr); } @@ -3434,8 +3454,6 @@ static int si_cp_resume(struct radeon_device *rdev) WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB0_RPTR); - /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; @@ -3460,8 +3478,6 @@ static int si_cp_resume(struct radeon_device *rdev) WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB1_RPTR); - /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; @@ -3486,8 +3502,6 @@ static int si_cp_resume(struct radeon_device *rdev) WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); - ring->rptr = RREG32(CP_RB2_RPTR); - /* start the rings */ si_cp_start(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; @@ -3872,11 +3886,9 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) if (!(reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -4041,18 +4053,21 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | ENABLE_L1_TLB | + ENABLE_L1_FRAGMENT_PROCESSING | SYSTEM_ACCESS_MODE_NOT_IN_SYS | ENABLE_ADVANCED_DRIVER_MODEL | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | + ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | EFFECTIVE_L2_QUEUE_SIZE(7) | CONTEXT1_IDENTITY_ACCESS_MODE(1)); WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | - L2_CACHE_BIGK_FRAGMENT_SIZE(0)); + BANK_SELECT(4) | + L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); @@ -4089,6 +4104,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) | RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | @@ -5777,7 +5793,6 @@ int si_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; u32 thermal_int = 0; @@ -5916,16 +5931,22 @@ int si_irq_set(struct radeon_device *rdev) } if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (!ASIC_IS_NODCE(rdev)) { @@ -6082,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -6143,7 +6165,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[0])) - radeon_crtc_handle_flip(rdev, 0); + radeon_crtc_handle_vblank(rdev, 0); rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -6169,7 +6191,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[1])) - radeon_crtc_handle_flip(rdev, 1); + radeon_crtc_handle_vblank(rdev, 1); rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } @@ -6195,7 +6217,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[2])) - radeon_crtc_handle_flip(rdev, 2); + radeon_crtc_handle_vblank(rdev, 2); rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); } @@ -6221,7 +6243,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[3])) - radeon_crtc_handle_flip(rdev, 3); + radeon_crtc_handle_vblank(rdev, 3); rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); } @@ -6247,7 +6269,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[4])) - radeon_crtc_handle_flip(rdev, 4); + radeon_crtc_handle_vblank(rdev, 4); rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); } @@ -6273,7 +6295,7 @@ restart_ih: wake_up(&rdev->irq.vblank_queue); } if (atomic_read(&rdev->irq.pflip[5])) - radeon_crtc_handle_flip(rdev, 5); + radeon_crtc_handle_vblank(rdev, 5); rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); } @@ -6289,6 +6311,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: @@ -6338,18 +6369,24 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 146: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + if (addr == 0x0 && status == 0x0) + break; dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", addr); dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); si_vm_decode_fault(rdev, status, addr); - /* reset addr and status */ - WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); break; case 176: /* RINGID0 CP_INT */ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); @@ -6614,7 +6651,8 @@ int si_resume(struct radeon_device *rdev) /* init golden registers */ si_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = si_startup(rdev); diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 59be2cfcbb4..e24c94b6d14 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -49,11 +49,9 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) mask = RADEON_RESET_DMA1; if (!(reset_mask & mask)) { - radeon_ring_lockup_update(ring); + radeon_ring_lockup_update(rdev, ring); return false; } - /* force ring activities */ - radeon_ring_force_activity(rdev, ring); return radeon_ring_test_lockup(rdev, ring); } @@ -81,7 +79,25 @@ void si_dma_vm_set_page(struct radeon_device *rdev, trace_radeon_vm_set_page(pe, addr, count, incr, flags); - if (flags & R600_PTE_SYSTEM) { + if (flags == R600_PTE_GART) { + uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; + while (count) { + unsigned bytes = count * 8; + if (bytes > 0xFFFF8) + bytes = 0xFFFF8; + + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, + 1, 0, 0, bytes); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; + + pe += bytes; + src += bytes; + count -= bytes / 8; + } + } else if (flags & R600_PTE_SYSTEM) { while (count) { ndw = count * 2; if (ndw > 0xFFFFE) @@ -204,8 +220,8 @@ int si_copy_dma(struct radeon_device *rdev, cur_size_in_bytes = 0xFFFFF; size_in_bytes -= cur_size_in_bytes; radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); - radeon_ring_write(ring, dst_offset & 0xffffffff); - radeon_ring_write(ring, src_offset & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(dst_offset)); + radeon_ring_write(ring, lower_32_bits(src_offset)); radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); src_offset += cur_size_in_bytes; @@ -215,6 +231,7 @@ int si_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0471501338f..58918868f89 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1948,6 +1948,10 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_cape_verde; break; + case 0x682C: + si_pi->cac_weights = cac_weights_cape_verde_pro; + si_pi->dte_data = dte_data_sun_xt; + break; case 0x6825: case 0x6827: si_pi->cac_weights = cac_weights_heathrow; @@ -1971,10 +1975,9 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) si_pi->dte_data = dte_data_venus_xt; break; case 0x6823: - si_pi->cac_weights = cac_weights_chelsea_pro; - si_pi->dte_data = dte_data_venus_pro; - break; case 0x682B: + case 0x6822: + case 0x682A: si_pi->cac_weights = cac_weights_chelsea_pro; si_pi->dte_data = dte_data_venus_pro; break; @@ -1988,6 +1991,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x6601: case 0x6621: case 0x6603: + case 0x6605: si_pi->cac_weights = cac_weights_mars_pro; si_pi->lcac_config = lcac_mars_pro; si_pi->cac_override = cac_override_oland; @@ -1998,6 +2002,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x6600: case 0x6606: case 0x6620: + case 0x6604: si_pi->cac_weights = cac_weights_mars_xt; si_pi->lcac_config = lcac_mars_pro; si_pi->cac_override = cac_override_oland; @@ -2006,6 +2011,8 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) update_dte_from_pl2 = true; break; case 0x6611: + case 0x6613: + case 0x6608: si_pi->cac_weights = cac_weights_oland_pro; si_pi->lcac_config = lcac_mars_pro; si_pi->cac_override = cac_override_oland; @@ -2395,7 +2402,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev, if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -6271,9 +6278,6 @@ static int si_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -6350,6 +6354,10 @@ int si_dpm_init(struct radeon_device *rdev) pi->min_vddc_in_table = 0; pi->max_vddc_in_table = 0; + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = si_parse_power_table(rdev); if (ret) return ret; @@ -6472,7 +6480,8 @@ void si_dpm_fini(struct radeon_device *rdev) void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; struct ni_ps *ps = ni_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 9239a6d2912..fd414d34d88 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -107,8 +107,8 @@ #define SPLL_CHG_STATUS (1 << 1) #define SPLL_CNTL_MODE 0x618 #define SPLL_SW_DIR_CONTROL (1 << 0) -# define SPLL_REFCLK_SEL(x) ((x) << 8) -# define SPLL_REFCLK_SEL_MASK 0xFF00 +# define SPLL_REFCLK_SEL(x) ((x) << 26) +# define SPLL_REFCLK_SEL_MASK (3 << 26) #define CG_SPLL_SPREAD_SPECTRUM 0x620 #define SSEN (1 << 0) @@ -362,6 +362,7 @@ #define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) #define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) #define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) +#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24) #define VM_CONTEXT1_CNTL 0x1414 #define VM_CONTEXT0_CNTL2 0x1430 #define VM_CONTEXT1_CNTL2 0x1434 @@ -1798,4 +1799,51 @@ #define DMA_PACKET_CONSTANT_FILL 0xd #define DMA_PACKET_NOP 0xf +#define VCE_STATUS 0x20004 +#define VCE_VCPU_CNTL 0x20014 +#define VCE_CLK_EN (1 << 0) +#define VCE_VCPU_CACHE_OFFSET0 0x20024 +#define VCE_VCPU_CACHE_SIZE0 0x20028 +#define VCE_VCPU_CACHE_OFFSET1 0x2002c +#define VCE_VCPU_CACHE_SIZE1 0x20030 +#define VCE_VCPU_CACHE_OFFSET2 0x20034 +#define VCE_VCPU_CACHE_SIZE2 0x20038 +#define VCE_SOFT_RESET 0x20120 +#define VCE_ECPU_SOFT_RESET (1 << 0) +#define VCE_FME_SOFT_RESET (1 << 2) +#define VCE_RB_BASE_LO2 0x2016c +#define VCE_RB_BASE_HI2 0x20170 +#define VCE_RB_SIZE2 0x20174 +#define VCE_RB_RPTR2 0x20178 +#define VCE_RB_WPTR2 0x2017c +#define VCE_RB_BASE_LO 0x20180 +#define VCE_RB_BASE_HI 0x20184 +#define VCE_RB_SIZE 0x20188 +#define VCE_RB_RPTR 0x2018c +#define VCE_RB_WPTR 0x20190 +#define VCE_CLOCK_GATING_A 0x202f8 +#define VCE_CLOCK_GATING_B 0x202fc +#define VCE_UENC_CLOCK_GATING 0x205bc +#define VCE_UENC_REG_CLOCK_GATING 0x205c0 +#define VCE_FW_REG_STATUS 0x20e10 +# define VCE_FW_REG_STATUS_BUSY (1 << 0) +# define VCE_FW_REG_STATUS_PASS (1 << 3) +# define VCE_FW_REG_STATUS_DONE (1 << 11) +#define VCE_LMI_FW_START_KEYSEL 0x20e18 +#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20 +#define VCE_LMI_CTRL2 0x20e74 +#define VCE_LMI_CTRL 0x20e98 +#define VCE_LMI_VM_CTRL 0x20ea0 +#define VCE_LMI_SWAP_CNTL 0x20eb4 +#define VCE_LMI_SWAP_CNTL1 0x20eb8 +#define VCE_LMI_CACHE_CTRL 0x20ef4 + +#define VCE_CMD_NO_OP 0x00000000 +#define VCE_CMD_END 0x00000001 +#define VCE_CMD_IB 0x00000002 +#define VCE_CMD_FENCE 0x00000003 +#define VCE_CMD_TRAP 0x00000004 +#define VCE_CMD_IB_AUTO 0x00000005 +#define VCE_CMD_SEMAPHORE 0x00000006 + #endif diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index f121efe12dc..3f0e8d7b8db 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1484,9 +1484,6 @@ static int sumo_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -1772,6 +1769,10 @@ int sumo_dpm_init(struct radeon_device *rdev) sumo_construct_boot_and_acpi_state(rdev); + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = sumo_parse_power_table(rdev); if (ret) return ret; @@ -1807,7 +1808,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev struct seq_file *m) { struct sumo_power_info *pi = sumo_get_pi(rdev); - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct radeon_ps *rps = &pi->current_rps; struct sumo_ps *ps = sumo_get_ps(rps); struct sumo_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2d447192d6f..32e50be9c4a 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1694,9 +1694,6 @@ static int trinity_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; - rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -1877,7 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) pi->at[i] = TRINITY_AT_DFLT; - pi->enable_bapm = false; + /* There are stability issues reported on with + * bapm enabled when switching between AC and battery + * power. At the same time, some MSI boards hang + * if it's not enabled and dpm is enabled. Just enable + * it for MSI boards right now. + */ + if (rdev->pdev->subsystem_vendor == 0x1462) + pi->enable_bapm = true; + else + pi->enable_bapm = false; pi->enable_nbps_policy = true; pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; @@ -1895,6 +1901,10 @@ int trinity_dpm_init(struct radeon_device *rdev) trinity_construct_boot_state(rdev); + ret = r600_get_platform_caps(rdev); + if (ret) + return ret; + ret = trinity_parse_power_table(rdev); if (ret) return ret; @@ -1926,7 +1936,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev, void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct trinity_power_info *pi = trinity_get_pi(rdev); + struct radeon_ps *rps = &pi->current_rps; struct trinity_ps *ps = trinity_get_ps(rps); struct trinity_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index d4a68af1a27..be42c812520 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev) int r; /* raise clocks while booting up the VCPU */ - radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + radeon_set_uvd_clocks(rdev, 10000, 10000); + else + radeon_set_uvd_clocks(rdev, 53300, 40000); r = uvd_v1_0_start(rdev); if (r) @@ -262,7 +265,7 @@ int uvd_v1_0_start(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(UVD_RBC_RB_RPTR, 0x0); - ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR); + ring->wptr = RREG32(UVD_RBC_RB_RPTR); WREG32(UVD_RBC_RB_WPTR, ring->wptr); /* set the ring address */ @@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) struct radeon_fence *fence = NULL; int r; - r = radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + r = radeon_set_uvd_clocks(rdev, 10000, 10000); + else + r = radeon_set_uvd_clocks(rdev, 53300, 40000); if (r) { DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 824550db3fe..8bfdadd5659 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -45,7 +45,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); - radeon_ring_write(ring, addr & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); radeon_ring_write(ring, upper_32_bits(addr) & 0xff); radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); @@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); radeon_ring_write(ring, 2); - return; } /** diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c new file mode 100644 index 00000000000..b44d9c842f7 --- /dev/null +++ b/drivers/gpu/drm/radeon/vce_v1_0.c @@ -0,0 +1,187 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Christian König <christian.koenig@amd.com> + */ + +#include <linux/firmware.h> +#include <drm/drmP.h> +#include "radeon.h" +#include "radeon_asic.h" +#include "sid.h" + +/** + * vce_v1_0_get_rptr - get read pointer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Returns the current hardware read pointer + */ +uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + if (ring->idx == TN_RING_TYPE_VCE1_INDEX) + return RREG32(VCE_RB_RPTR); + else + return RREG32(VCE_RB_RPTR2); +} + +/** + * vce_v1_0_get_wptr - get write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Returns the current hardware write pointer + */ +uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + if (ring->idx == TN_RING_TYPE_VCE1_INDEX) + return RREG32(VCE_RB_WPTR); + else + return RREG32(VCE_RB_WPTR2); +} + +/** + * vce_v1_0_set_wptr - set write pointer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * + * Commits the write pointer to the hardware + */ +void vce_v1_0_set_wptr(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + if (ring->idx == TN_RING_TYPE_VCE1_INDEX) + WREG32(VCE_RB_WPTR, ring->wptr); + else + WREG32(VCE_RB_WPTR2, ring->wptr); +} + +/** + * vce_v1_0_start - start VCE block + * + * @rdev: radeon_device pointer + * + * Setup and start the VCE block + */ +int vce_v1_0_start(struct radeon_device *rdev) +{ + struct radeon_ring *ring; + int i, j, r; + + /* set BUSY flag */ + WREG32_P(VCE_STATUS, 1, ~1); + + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + WREG32(VCE_RB_RPTR, ring->wptr); + WREG32(VCE_RB_WPTR, ring->wptr); + WREG32(VCE_RB_BASE_LO, ring->gpu_addr); + WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32(VCE_RB_SIZE, ring->ring_size / 4); + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + WREG32(VCE_RB_RPTR2, ring->wptr); + WREG32(VCE_RB_WPTR2, ring->wptr); + WREG32(VCE_RB_BASE_LO2, ring->gpu_addr); + WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32(VCE_RB_SIZE2, ring->ring_size / 4); + + WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN); + + WREG32_P(VCE_SOFT_RESET, + VCE_ECPU_SOFT_RESET | + VCE_FME_SOFT_RESET, ~( + VCE_ECPU_SOFT_RESET | + VCE_FME_SOFT_RESET)); + + mdelay(100); + + WREG32_P(VCE_SOFT_RESET, 0, ~( + VCE_ECPU_SOFT_RESET | + VCE_FME_SOFT_RESET)); + + for (i = 0; i < 10; ++i) { + uint32_t status; + for (j = 0; j < 100; ++j) { + status = RREG32(VCE_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET); + mdelay(10); + WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET); + mdelay(10); + r = -1; + } + + /* clear BUSY flag */ + WREG32_P(VCE_STATUS, 0, ~1); + + if (r) { + DRM_ERROR("VCE not responding, giving up!!!\n"); + return r; + } + + return 0; +} + +int vce_v1_0_init(struct radeon_device *rdev) +{ + struct radeon_ring *ring; + int r; + + r = vce_v1_0_start(rdev); + if (r) + return r; + + ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; + ring->ready = true; + r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring); + if (r) { + ring->ready = false; + return r; + } + + ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; + ring->ready = true; + r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring); + if (r) { + ring->ready = false; + return r; + } + + DRM_INFO("VCE initialized successfully.\n"); + + return 0; +} diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c new file mode 100644 index 00000000000..1ac7bb825a1 --- /dev/null +++ b/drivers/gpu/drm/radeon/vce_v2_0.c @@ -0,0 +1,181 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Christian König <christian.koenig@amd.com> + */ + +#include <linux/firmware.h> +#include <drm/drmP.h> +#include "radeon.h" +#include "radeon_asic.h" +#include "cikd.h" + +static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated) +{ + u32 tmp; + + if (gated) { + tmp = RREG32(VCE_CLOCK_GATING_B); + tmp |= 0xe70000; + WREG32(VCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(VCE_UENC_CLOCK_GATING); + tmp |= 0xff000000; + WREG32(VCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); + + WREG32(VCE_CGTT_CLK_OVERRIDE, 0); + } else { + tmp = RREG32(VCE_CLOCK_GATING_B); + tmp |= 0xe7; + tmp &= ~0xe70000; + WREG32(VCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(VCE_UENC_CLOCK_GATING); + tmp |= 0x1fe000; + tmp &= ~0xff000000; + WREG32(VCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); + tmp |= 0x3fc; + WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); + } +} + +static void vce_v2_0_set_dyn_cg(struct radeon_device *rdev, bool gated) +{ + u32 orig, tmp; + + tmp = RREG32(VCE_CLOCK_GATING_B); + tmp &= ~0x00060006; + if (gated) { + tmp |= 0xe10000; + } else { + tmp |= 0xe1; + tmp &= ~0xe10000; + } + WREG32(VCE_CLOCK_GATING_B, tmp); + + orig = tmp = RREG32(VCE_UENC_CLOCK_GATING); + tmp &= ~0x1fe000; + tmp &= ~0xff000000; + if (tmp != orig) + WREG32(VCE_UENC_CLOCK_GATING, tmp); + + orig = tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + if (tmp != orig) + WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); + + if (gated) + WREG32(VCE_CGTT_CLK_OVERRIDE, 0); +} + +static void vce_v2_0_disable_cg(struct radeon_device *rdev) +{ + WREG32(VCE_CGTT_CLK_OVERRIDE, 7); +} + +void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable) +{ + bool sw_cg = false; + + if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) { + if (sw_cg) + vce_v2_0_set_sw_cg(rdev, true); + else + vce_v2_0_set_dyn_cg(rdev, true); + } else { + vce_v2_0_disable_cg(rdev); + + if (sw_cg) + vce_v2_0_set_sw_cg(rdev, false); + else + vce_v2_0_set_dyn_cg(rdev, false); + } +} + +static void vce_v2_0_init_cg(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = RREG32(VCE_CLOCK_GATING_A); + tmp &= ~(CGC_CLK_GATE_DLY_TIMER_MASK | CGC_CLK_GATER_OFF_DLY_TIMER_MASK); + tmp |= (CGC_CLK_GATE_DLY_TIMER(0) | CGC_CLK_GATER_OFF_DLY_TIMER(4)); + tmp |= CGC_UENC_WAIT_AWAKE; + WREG32(VCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(VCE_UENC_CLOCK_GATING); + tmp &= ~(CLOCK_ON_DELAY_MASK | CLOCK_OFF_DELAY_MASK); + tmp |= (CLOCK_ON_DELAY(0) | CLOCK_OFF_DELAY(4)); + WREG32(VCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(VCE_CLOCK_GATING_B); + tmp |= 0x10; + tmp &= ~0x100000; + WREG32(VCE_CLOCK_GATING_B, tmp); +} + +int vce_v2_0_resume(struct radeon_device *rdev) +{ + uint64_t addr = rdev->vce.gpu_addr; + uint32_t size; + + WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16)); + WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); + WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); + WREG32(VCE_CLOCK_GATING_B, 0xf7); + + WREG32(VCE_LMI_CTRL, 0x00398000); + WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1); + WREG32(VCE_LMI_SWAP_CNTL, 0); + WREG32(VCE_LMI_SWAP_CNTL1, 0); + WREG32(VCE_LMI_VM_CTRL, 0); + + size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); + WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); + WREG32(VCE_VCPU_CACHE_SIZE0, size); + + addr += size; + size = RADEON_VCE_STACK_SIZE; + WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); + WREG32(VCE_VCPU_CACHE_SIZE1, size); + + addr += size; + size = RADEON_VCE_HEAP_SIZE; + WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); + WREG32(VCE_VCPU_CACHE_SIZE2, size); + + WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100); + + WREG32_P(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, + ~VCE_SYS_INT_TRAP_INTERRUPT_EN); + + vce_v2_0_init_cg(rdev); + + return 0; +} |
