aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/radeon/Makefile3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c370
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c62
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c794
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h176
-rw-r--r--drivers/gpu/drm/radeon/radeon.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c241
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--include/drm/drm_pciids.h35
19 files changed, 1775 insertions, 121 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 1cc7b937b1e..83c59079193 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -54,7 +54,8 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index bb45517719a..7e7c0b32bb6 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -249,13 +249,17 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
atombios_blank_crtc(crtc, 0);
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ /* XXX re-enable when interrupt support is added */
+ if (!ASIC_IS_DCE4(rdev))
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ /* XXX re-enable when interrupt support is added */
+ if (!ASIC_IS_DCE4(rdev))
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, 1);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
@@ -367,6 +371,10 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
uint16_t percentage = 0;
uint8_t type = 0, step = 0, delay = 0, range = 0;
+ /* XXX add ss support for DCE4 */
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -411,6 +419,7 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
union adjust_pixel_clock {
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
};
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
@@ -422,6 +431,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
+ int encoder_mode = 0;
/* reset the pll flags */
pll->flags = 0;
@@ -459,6 +469,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -484,14 +495,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
*/
if (ASIC_IS_DCE3(rdev)) {
union adjust_pixel_clock args;
- struct radeon_encoder_atom_dig *dig;
u8 frev, crev;
int index;
- if (!radeon_encoder->enc_priv)
- return adjusted_clock;
- dig = radeon_encoder->enc_priv;
-
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
@@ -505,12 +511,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
case 2:
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
- args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ args.v1.ucEncodeMode = encoder_mode;
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
+ case 3:
+ args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v3.sInput.ucEncodeMode = encoder_mode;
+ args.v3.sInput.ucDispPllConfig = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ else {
+ if (dig->coherent_mode)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ /* may want to enable SS on DP/eDP eventually */
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+ if (args.v3.sOutput.ucRefDiv) {
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ pll->reference_div = args.v3.sOutput.ucRefDiv;
+ }
+ if (args.v3.sOutput.ucPostDiv) {
+ pll->flags |= RADEON_PLL_USE_POST_DIV;
+ pll->post_div = args.v3.sOutput.ucPostDiv;
+ }
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return adjusted_clock;
@@ -529,9 +574,47 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS v1;
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
+ PIXEL_CLOCK_PARAMETERS_V5 v5;
};
-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev);
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 5:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v5.ucCRTC = ATOM_CRTC_INVALID;
+ args.v5.usPixelClock = rdev->clock.default_dispclk;
+ args.v5.ucPpll = ATOM_DCPLL;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -545,12 +628,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
+ int encoder_mode = 0;
memset(&args, 0, sizeof(args));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
@@ -558,10 +643,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!radeon_encoder)
return;
- if (radeon_crtc->crtc_id == 0)
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
- else
+ break;
+ case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
@@ -582,8 +675,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
- args.v1.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v1.ucPpll = radeon_crtc->pll_id;
args.v1.ucCRTC = radeon_crtc->crtc_id;
args.v1.ucRefDivSrc = 1;
break;
@@ -593,8 +685,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
- args.v2.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v2.ucPpll = radeon_crtc->pll_id;
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucRefDivSrc = 1;
break;
@@ -604,12 +695,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
- args.v3.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ args.v3.ucPpll = radeon_crtc->pll_id;
+ args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
- args.v3.ucEncoderMode =
- atombios_get_encoder_mode(encoder);
+ args.v3.ucEncoderMode = encoder_mode;
+ break;
+ case 5:
+ args.v5.ucCRTC = radeon_crtc->crtc_id;
+ args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v5.ucRefDiv = ref_div;
+ args.v5.usFbDiv = cpu_to_le16(fb_div);
+ args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v5.ucPostDiv = post_div;
+ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ args.v5.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v5.ucEncoderMode = encoder_mode;
+ args.v5.ucPpll = radeon_crtc->pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -624,6 +725,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ uint64_t fb_location;
+ uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ break;
+ case 15:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+ break;
+ case 16:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+ break;
+ case 24:
+ case 32:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ crtc->fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(AVIVO_D1VGA_CONTROL, 0);
+ break;
+ case 1:
+ WREG32(AVIVO_D2VGA_CONTROL, 0);
+ break;
+ case 2:
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ break;
+ case 3:
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ break;
+ case 4:
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ break;
+ case 5:
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+
+ fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ crtc->mode.vdisplay);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+ (x << 16) | y);
+ WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
+ if (old_fb && old_fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(old_fb);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
+ return 0;
+}
+
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -761,7 +996,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
+ return evergreen_crtc_set_base(crtc, x, y, old_fb);
+ else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_set_base(crtc, x, y, old_fb);
else
return radeon_crtc_set_base(crtc, x, y, old_fb);
@@ -791,6 +1028,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
}
}
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+ struct drm_crtc *test_crtc;
+ uint32_t pll_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ /* if crtc is driving DP and we have an ext clock, use that */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
+ if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
+ if (rdev->clock.dp_extclk)
+ return ATOM_PPLL_INVALID;
+ }
+ }
+ }
+
+ /* otherwise, pick one of the plls */
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_test_crtc;
+
+ if (crtc == test_crtc)
+ continue;
+
+ radeon_test_crtc = to_radeon_crtc(test_crtc);
+ if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
+ (radeon_test_crtc->pll_id <= ATOM_PPLL2))
+ pll_in_use |= (1 << radeon_test_crtc->pll_id);
+ }
+ if (!(pll_in_use & 1))
+ return ATOM_PPLL1;
+ return ATOM_PPLL2;
+ } else
+ return radeon_crtc->crtc_id;
+
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -802,19 +1079,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
/* TODO color tiling */
+ /* pick pll */
+ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+
atombios_set_ss(crtc, 0);
+ /* always set DCPLL */
+ if (ASIC_IS_DCE4(rdev))
+ atombios_crtc_set_dcpll(crtc);
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_set_ss(crtc, 1);
- atombios_crtc_set_timing(crtc, adjusted_mode);
- if (ASIC_IS_AVIVO(rdev))
- atombios_crtc_set_base(crtc, x, y, old_fb);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ else if (ASIC_IS_AVIVO(rdev))
+ atombios_crtc_set_timing(crtc, adjusted_mode);
else {
+ atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- atombios_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_fixup(crtc);
}
+ atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
return 0;
@@ -854,8 +1139,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc)
{
- if (radeon_crtc->crtc_id == 1)
- radeon_crtc->crtc_offset =
- AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ default:
+ radeon_crtc->crtc_id = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ radeon_crtc->crtc_id = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ radeon_crtc->crtc_id = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ radeon_crtc->crtc_id = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ radeon_crtc->crtc_id = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ radeon_crtc->crtc_id = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ break;
+ }
+ } else {
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset =
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+ radeon_crtc->pll_id = -1;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 71060114d5d..0b6f2cef1c5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
train_set[lane] = v | p;
}
+union aux_channel_transaction {
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
- PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
@@ -339,29 +343,31 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
memcpy(base, req_bytes, num_bytes);
- args.lpAuxRequest = 0;
- args.lpDataOut = 16;
- args.ucDataOutLen = 0;
- args.ucChannelID = chan->rec.i2c_id;
- args.ucDelay = delay / 10;
+ args.v1.lpAuxRequest = 0;
+ args.v1.lpDataOut = 16;
+ args.v1.ucDataOutLen = 0;
+ args.v1.ucChannelID = chan->rec.i2c_id;
+ args.v1.ucDelay = delay / 10;
+ if (ASIC_IS_DCE4(rdev))
+ args.v2.ucHPD_ID = chan->rec.hpd_id;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus) {
+ if (args.v1.ucReplyStatus) {
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus);
+ chan->rec.i2c_id, args.v1.ucReplyStatus);
return false;
}
- if (args.ucDataOutLen && read_byte && read_buf_len) {
- if (read_buf_len < args.ucDataOutLen) {
+ if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
+ if (read_buf_len < args.v1.ucDataOutLen) {
DRM_ERROR("Buffer to small for return answer %d %d\n",
- read_buf_len, args.ucDataOutLen);
+ read_buf_len, args.v1.ucDataOutLen);
return false;
}
{
- int len = min(read_buf_len, args.ucDataOutLen);
+ int len = min(read_buf_len, args.v1.ucDataOutLen);
memcpy(read_byte, base + 16, len);
}
}
@@ -622,12 +628,19 @@ void dp_link_train(struct drm_encoder *encoder,
dp_set_link_bw_lanes(radeon_connector, link_configuration);
/* disable downspread on the sink */
dp_set_downspread(radeon_connector, 0);
- /* start training on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
- dig_connector->dp_clock, enc_id, 0);
- /* set training pattern 1 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 0);
+ if (ASIC_IS_DCE4(rdev)) {
+ /* start training on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
+ /* set training pattern 1 on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
+ } else {
+ /* start training on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+ dig_connector->dp_clock, enc_id, 0);
+ /* set training pattern 1 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 0);
+ }
/* set initial vs/emph */
memset(train_set, 0, 4);
@@ -687,8 +700,11 @@ void dp_link_train(struct drm_encoder *encoder,
/* set training pattern 2 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
/* set training pattern 2 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 1);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 1);
/* channel equalization loop */
tries = 0;
@@ -725,7 +741,11 @@ void dp_link_train(struct drm_encoder *encoder,
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* disable the training pattern on the sink */
- dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dig_connector->dp_clock, enc_id, 0);
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 00000000000..c2f9752e4ee
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_drm.h"
+#include "rv770d.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+ /* XXX */
+ return connected;
+}
+
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ /* XXX */
+}
+
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(SRBM_STATUS) & 0x1F00;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+/*
+ * GART
+ */
+int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r, i;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ for (i = 1; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ r600_pcie_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i, r;
+
+ /* Disable all tables */
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
+}
+
+void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+ evergreen_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+void evergreen_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ save->vga_control[0] = RREG32(D1VGA_CONTROL);
+ save->vga_control[1] = RREG32(D2VGA_CONTROL);
+ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
+ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ /* Stop all video */
+ WREG32(VGA_RENDER_CONTROL, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);