diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_atombios.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_atombios.c | 1442 |
1 files changed, 1344 insertions, 98 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 9e72daeeddc..173f378428a 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -23,34 +23,17 @@ * Authors: Dave Airlie * Alex Deucher */ -#include "drmP.h" -#include "radeon_drm.h" +#include <drm/drmP.h> +#include <drm/radeon_drm.h> #include "radeon.h" #include "atom.h" #include "atom-bits.h" -/* from radeon_encoder.c */ -extern uint32_t -radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, - uint8_t dac); -extern void radeon_link_encoder_connector(struct drm_device *dev); extern void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device, u16 caps); -/* from radeon_connector.c */ -extern void -radeon_add_atom_connector(struct drm_device *dev, - uint32_t connector_id, - uint32_t supported_device, - int connector_type, - struct radeon_i2c_bus_rec *i2c_bus, - uint32_t igp_lane_info, - uint16_t connector_object_id, - struct radeon_hpd *hpd, - struct radeon_router *router); - /* from radeon_legacy_encoder.c */ extern void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, @@ -163,8 +146,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); + gpio = &i2c_info->asGPIO_Info[0]; for (i = 0; i < num_indices; i++) { - gpio = &i2c_info->asGPIO_Info[i]; radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); @@ -172,6 +155,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); break; } + gpio = (ATOM_GPIO_I2C_ASSIGMENT *) + ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); } } @@ -195,9 +180,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); + gpio = &i2c_info->asGPIO_Info[0]; for (i = 0; i < num_indices; i++) { - gpio = &i2c_info->asGPIO_Info[i]; - radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); @@ -206,12 +190,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) sprintf(stmp, "0x%x", i2c.i2c_id); rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); } + gpio = (ATOM_GPIO_I2C_ASSIGMENT *) + ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); } } } static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, - u8 id) + u8 id) { struct atom_context *ctx = rdev->mode_info.atom_context; struct radeon_gpio_rec gpio; @@ -230,8 +216,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); + pin = gpio_info->asGPIO_Pin; for (i = 0; i < num_indices; i++) { - pin = &gpio_info->asGPIO_Pin[i]; if (id == pin->ucGPIO_ID) { gpio.id = pin->ucGPIO_ID; gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; @@ -239,6 +225,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, gpio.valid = true; break; } + pin = (ATOM_GPIO_PIN_ASSIGNMENT *) + ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT)); } } @@ -253,7 +241,9 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device memset(&hpd, 0, sizeof(struct radeon_hpd)); - if (ASIC_IS_DCE4(rdev)) + if (ASIC_IS_DCE6(rdev)) + reg = SI_DC_GPIO_HPD_A; + else if (ASIC_IS_DCE4(rdev)) reg = EVERGREEN_DC_GPIO_HPD_A; else reg = AVIVO_DC_GPIO_HPD_A; @@ -438,10 +428,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, */ if ((dev->pdev->device == 0x9498) && (dev->pdev->subsystem_vendor == 0x1682) && - (dev->pdev->subsystem_device == 0x2452)) { + (dev->pdev->subsystem_device == 0x2452) && + (i2c_bus->valid == false) && + !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) { struct radeon_device *rdev = dev->dev_private; *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); } + + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ + if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && + (dev->pdev->subsystem_vendor == 0x1734) && + (dev->pdev->subsystem_device == 0x11bd)) { + if (*connector_type == DRM_MODE_CONNECTOR_VGA) { + *connector_type = DRM_MODE_CONNECTOR_DVII; + *line_mux = 0x3103; + } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) { + *connector_type = DRM_MODE_CONNECTOR_DVII; + } + } + + return true; } @@ -693,13 +699,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); + u8 *num_dst_objs = (u8 *) + ((u8 *)router_src_dst_table + 1 + + (router_src_dst_table->ucNumberOfSrc * 2)); + u16 *dst_objs = (u16 *)(num_dst_objs + 1); int enum_id; router.router_id = router_obj_id; - for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; - enum_id++) { + for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) { if (le16_to_cpu(path->usConnObjectId) == - le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) + le16_to_cpu(dst_objs[enum_id])) break; } @@ -1218,13 +1227,22 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); if (rdev->clock.default_dispclk == 0) { - if (ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE6(rdev)) + rdev->clock.default_dispclk = 60000; /* 600 Mhz */ + else if (ASIC_IS_DCE5(rdev)) rdev->clock.default_dispclk = 54000; /* 540 Mhz */ else rdev->clock.default_dispclk = 60000; /* 600 Mhz */ } + /* set a reasonable default for DP */ + if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + rdev->clock.default_dispclk / 100); + rdev->clock.default_dispclk = 60000; + } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); + rdev->clock.current_dispclk = rdev->clock.default_dispclk; } *dcpll = *p1pll; @@ -1232,6 +1250,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) if (rdev->clock.max_pixel_clock == 0) rdev->clock.max_pixel_clock = 40000; + /* not technically a clock, but... */ + rdev->mode_info.firmware_flags = + le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); + return true; } @@ -1241,6 +1263,9 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) @@ -1333,6 +1358,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); uint16_t data_offset, size; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; + struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign; uint8_t frev, crev; int i, num_indices; @@ -1344,18 +1370,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); - + ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) + ((u8 *)&ss_info->asSS_Info[0]); for (i = 0; i < num_indices; i++) { - if (ss_info->asSS_Info[i].ucSS_Id == id) { + if (ss_assign->ucSS_Id == id) { ss->percentage = - le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); - ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; - ss->step = ss_info->asSS_Info[i].ucSS_Step; - ss->delay = ss_info->asSS_Info[i].ucSS_Delay; - ss->range = ss_info->asSS_Info[i].ucSS_Range; - ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; + le16_to_cpu(ss_assign->usSpreadSpectrumPercentage); + ss->type = ss_assign->ucSpreadSpectrumType; + ss->step = ss_assign->ucSS_Step; + ss->delay = ss_assign->ucSS_Delay; + ss->range = ss_assign->ucSS_Range; + ss->refdiv = ss_assign->ucRecommendedRef_Div; return true; } + ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) + ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT)); } } return false; @@ -1368,27 +1397,66 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; + union igp_info *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { - igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) + igp_info = (union igp_info *) (mode_info->atom_context->bios + data_offset); - switch (id) { - case ASIC_INTERNAL_SS_ON_TMDS: - percentage = le16_to_cpu(igp_info->usDVISSPercentage); - rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); + switch (crev) { + case 6: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_HDMI: - percentage = le16_to_cpu(igp_info->usHDMISSPercentage); - rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); + case 7: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_LVDS: - percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); - rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); + case 8: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz); + break; + } + break; + default: + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } if (percentage) @@ -1404,6 +1472,12 @@ union asic_ss_info { struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; }; +union asic_ss_assignment { + struct _ATOM_ASIC_SS_ASSIGNMENT v1; + struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2; + struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3; +}; + bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id, u32 clock) @@ -1412,9 +1486,19 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); uint16_t data_offset, size; union asic_ss_info *ss_info; + union asic_ss_assignment *ss_assign; uint8_t frev, crev; int i, num_indices; + if (id == ASIC_INTERNAL_MEMORY_SS) { + if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT)) + return false; + } + if (id == ASIC_INTERNAL_ENGINE_SS) { + if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT)) + return false; + } + memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { @@ -1427,45 +1511,68 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT); + ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]); for (i = 0; i < num_indices; i++) { - if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { + if ((ss_assign->v1.ucClockIndication == id) && + (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) { ss->percentage = - le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); - ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; - ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); + le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage); + ss->type = ss_assign->v1.ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); + ss->percentage_divider = 100; return true; } + ss_assign = (union asic_ss_assignment *) + ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT)); } break; case 2: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); + ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]); for (i = 0; i < num_indices; i++) { - if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { + if ((ss_assign->v2.ucClockIndication == id) && + (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) { ss->percentage = - le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); - ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; - ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); + le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage); + ss->type = ss_assign->v2.ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); + ss->percentage_divider = 100; + if ((crev == 2) && + ((id == ASIC_INTERNAL_ENGINE_SS) || + (id == ASIC_INTERNAL_MEMORY_SS))) + ss->rate /= 100; return true; } + ss_assign = (union asic_ss_assignment *) + ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2)); } break; case 3: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); + ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]); for (i = 0; i < num_indices; i++) { - if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { + if ((ss_assign->v3.ucClockIndication == id) && + (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) { ss->percentage = - le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); - ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; - ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); + le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage); + ss->type = ss_assign->v3.ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); + if (ss_assign->v3.ucSpreadSpectrumMode & + SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK) + ss->percentage_divider = 1000; + else + ss->percentage_divider = 100; + if ((id == ASIC_INTERNAL_ENGINE_SS) || + (id == ASIC_INTERNAL_MEMORY_SS)) + ss->rate /= 100; if (rdev->flags & RADEON_IS_IGP) radeon_atombios_get_igp_ss_overrides(rdev, ss, id); return true; } + ss_assign = (union asic_ss_assignment *) + ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3)); } break; default: @@ -1600,7 +1707,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct kfree(edid); } } - record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); + record += fake_edid_record->ucFakeEDIDLength ? + fake_edid_record->ucFakeEDIDLength + 2 : + sizeof(ATOM_FAKE_EDID_PATCH_RECORD); break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; @@ -1698,7 +1807,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; - mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; + mode->crtc_clock = mode->clock = + le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; if (index == 1) { /* PAL timings appear to have wrong values for totals */ @@ -1741,7 +1851,8 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; - mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10; + mode->crtc_clock = mode->clock = + le16_to_cpu(dtd_timings->usPixClk) * 10; break; } return true; @@ -1874,6 +1985,9 @@ static const char *pp_lib_thermal_controller_names[] = { "emc2103", "Sumo", "Northern Islands", + "Southern Islands", + "lm96163", + "Sea Islands", }; union power_info { @@ -1890,6 +2004,8 @@ union pplib_clock_info { struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; + struct _ATOM_PPLIB_SI_CLOCK_INFO si; + struct _ATOM_PPLIB_CI_CLOCK_INFO ci; }; union pplib_power_state { @@ -1955,7 +2071,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); /* add the i2c bus for thermal/fan chip */ - if (power_info->info.ucOverdriveThermalController > 0) { + if ((power_info->info.ucOverdriveThermalController > 0) && + (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) { DRM_INFO("Possible %s thermal controller at 0x%02x\n", thermal_controller_names[power_info->info.ucOverdriveThermalController], power_info->info.ucOverdriveControllerAddress >> 1); @@ -1973,6 +2090,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + if (num_modes == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; @@ -2147,6 +2266,21 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_NI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_SI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_CI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_KV; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == @@ -2154,7 +2288,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { DRM_INFO("Special thermal controller config\n"); - } else { + } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, @@ -2169,12 +2303,18 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } + } else { + DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + controller->ucType, + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); } } } -static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, - u16 *vddc, u16 *vddci) +void radeon_atombios_get_default_voltages(struct radeon_device *rdev, + u16 *vddc, u16 *vddci, u16 *mvdd) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); @@ -2184,6 +2324,7 @@ static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, *vddc = 0; *vddci = 0; + *mvdd = 0; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { @@ -2191,8 +2332,10 @@ static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, (union firmware_info *)(mode_info->atom_context->bios + data_offset); *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); - if ((frev == 2) && (crev >= 2)) + if ((frev == 2) && (crev >= 2)) { *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); + *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage); + } } } @@ -2203,9 +2346,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde int j; u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); u32 misc2 = le16_to_cpu(non_clock_info->usClassification); - u16 vddc, vddci; + u16 vddc, vddci, mvdd; - radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); + radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd); rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; @@ -2241,14 +2384,20 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; - if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { + if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) { /* NI chips post without MC ucode, so default clocks are strobe mode only */ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; } else { - /* patch the table values with the default slck/mclk from firmware info */ + u16 max_vddci = 0; + + if (ASIC_IS_DCE4(rdev)) + radeon_atom_get_max_voltage(rdev, + SET_VOLTAGE_TYPE_ASIC_VDDCI, + &max_vddci); + /* patch the table values with the default sclk/mclk from firmware info */ for (j = 0; j < mode_index; j++) { rdev->pm.power_state[state_index].clock_info[j].mclk = rdev->clock.default_mclk; @@ -2257,6 +2406,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde if (vddc) rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = vddc; + if (max_vddci) + rdev->pm.power_state[state_index].clock_info[j].voltage.vddci = + max_vddci; } } } @@ -2267,6 +2419,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, union pplib_clock_info *clock_info) { u32 sclk, mclk; + u16 vddc; if (rdev->flags & RADEON_IS_IGP) { if (rdev->family >= CHIP_PALM) { @@ -2278,7 +2431,29 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; } - } else if (ASIC_IS_DCE4(rdev)) { + } else if (rdev->family >= CHIP_BONAIRE) { + sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); + sclk |= clock_info->ci.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); + mclk |= clock_info->ci.ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_NONE; + } else if (rdev->family >= CHIP_TAHITI) { + sclk = le16_to_cpu(clock_info->si.usEngineClockLow); + sclk |= clock_info->si.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); + mclk |= clock_info->si.ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + le16_to_cpu(clock_info->si.usVDDC); + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = + le16_to_cpu(clock_info->si.usVDDCI); + } else if (rdev->family >= CHIP_CEDAR) { sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); sclk |= clock_info->evergreen.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); @@ -2305,11 +2480,22 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, } /* patch up vddc if necessary */ - if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) { - u16 vddc; - - if (radeon_atom_get_max_vddc(rdev, &vddc) == 0) + switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) { + case ATOM_VIRTUAL_VOLTAGE_ID0: + case ATOM_VIRTUAL_VOLTAGE_ID1: + case ATOM_VIRTUAL_VOLTAGE_ID2: + case ATOM_VIRTUAL_VOLTAGE_ID3: + case ATOM_VIRTUAL_VOLTAGE_ID4: + case ATOM_VIRTUAL_VOLTAGE_ID5: + case ATOM_VIRTUAL_VOLTAGE_ID6: + case ATOM_VIRTUAL_VOLTAGE_ID7: + if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage, + &vddc) == 0) rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc; + break; + default: + break; } if (rdev->flags & RADEON_IS_IGP) { @@ -2345,6 +2531,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + if (power_info->pplib.ucNumStates == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.power_state) @@ -2419,14 +2607,15 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) int i, j, non_clock_array_index, clock_array_index; int state_index = 0, mode_index = 0; union pplib_clock_info *clock_info; - struct StateArray *state_array; - struct ClockInfoArray *clock_info_array; - struct NonClockInfoArray *non_clock_info_array; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; bool valid; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; + u8 *power_state_offset; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) @@ -2434,24 +2623,26 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); - state_array = (struct StateArray *) + state_array = (struct _StateArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct ClockInfoArray *) + clock_info_array = (struct _ClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct NonClockInfoArray *) + non_clock_info_array = (struct _NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + if (state_array->ucNumEntries == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; + power_state_offset = (u8 *)state_array->states; for (i = 0; i < state_array->ucNumEntries; i++) { mode_index = 0; - power_state = (union pplib_power_state *)&state_array->states[i]; - /* XXX this might be an inagua bug... */ - non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * @@ -2463,11 +2654,8 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) if (power_state->v2.ucNumDPMLevels) { for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { clock_array_index = power_state->v2.clockInfoIndex[j]; - /* XXX this might be an inagua bug... */ - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index]; + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; valid = radeon_atombios_parse_pplib_clock_info(rdev, state_index, mode_index, clock_info); @@ -2487,6 +2675,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_info); state_index++; } + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { @@ -2533,7 +2722,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) default: break; } - } else { + } + + if (state_index == 0) { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { rdev->pm.power_state[0].clock_info = @@ -2567,6 +2758,184 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.current_vddc = 0; } +union get_clock_dividers { + struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1; + struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2; + struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3; + struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4; + struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5; + struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in; + struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out; +}; + +int radeon_atom_get_clock_dividers(struct radeon_device *rdev, + u8 clock_type, + u32 clock, + bool strobe_mode, + struct atom_clock_dividers *dividers) +{ + union get_clock_dividers args; + int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL); + u8 frev, crev; + + memset(&args, 0, sizeof(args)); + memset(dividers, 0, sizeof(struct atom_clock_dividers)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return -EINVAL; + + switch (crev) { + case 1: + /* r4xx, r5xx */ + args.v1.ucAction = clock_type; + args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */ + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->post_div = args.v1.ucPostDiv; + dividers->fb_div = args.v1.ucFbDiv; + dividers->enable_post_div = true; + break; + case 2: + case 3: + case 5: + /* r6xx, r7xx, evergreen, ni, si */ + if (rdev->family <= CHIP_RV770) { + args.v2.ucAction = clock_type; + args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */ + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->post_div = args.v2.ucPostDiv; + dividers->fb_div = le16_to_cpu(args.v2.usFbDiv); + dividers->ref_div = args.v2.ucAction; + if (rdev->family == CHIP_RV770) { + dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ? + true : false; + dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0; + } else + dividers->enable_post_div = (dividers->fb_div & 1) ? true : false; + } else { + if (clock_type == COMPUTE_ENGINE_PLL_PARAM) { + args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->post_div = args.v3.ucPostDiv; + dividers->enable_post_div = (args.v3.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; + dividers->enable_dithen = (args.v3.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; + dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); + dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); + dividers->ref_div = args.v3.ucRefDiv; + dividers->vco_mode = (args.v3.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0; + } else { + /* for SI we use ComputeMemoryClockParam for memory plls */ + if (rdev->family >= CHIP_TAHITI) + return -EINVAL; + args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock); + if (strobe_mode) + args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->post_div = args.v5.ucPostDiv; + dividers->enable_post_div = (args.v5.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; + dividers->enable_dithen = (args.v5.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; + dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv); + dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac); + dividers->ref_div = args.v5.ucRefDiv; + dividers->vco_mode = (args.v5.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0; + } + } + break; + case 4: + /* fusion */ + args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */ + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->post_divider = dividers->post_div = args.v4.ucPostDiv; + dividers->real_clock = le32_to_cpu(args.v4.ulClock); + break; + case 6: + /* CI */ + /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */ + args.v6_in.ulClock.ulComputeClockFlag = clock_type; + args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */ + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv); + dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac); + dividers->ref_div = args.v6_out.ucPllRefDiv; + dividers->post_div = args.v6_out.ucPllPostDiv; + dividers->flags = args.v6_out.ucPllCntlFlag; + dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock); + dividers->post_divider = args.v6_out.ulClock.ucPostDiv; + break; + default: + return -EINVAL; + } + return 0; +} + +int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev, + u32 clock, + bool strobe_mode, + struct atom_mpll_param *mpll_param) +{ + COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args; + int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam); + u8 frev, crev; + + memset(&args, 0, sizeof(args)); + memset(mpll_param, 0, sizeof(struct atom_mpll_param)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return -EINVAL; + + switch (frev) { + case 2: + switch (crev) { + case 1: + /* SI */ + args.ulClock = cpu_to_le32(clock); /* 10 khz */ + args.ucInputFlag = 0; + if (strobe_mode) + args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac); + mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv); + mpll_param->post_div = args.ucPostDiv; + mpll_param->dll_speed = args.ucDllSpeed; + mpll_param->bwcntl = args.ucBWCntl; + mpll_param->vco_mode = + (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK); + mpll_param->yclk_sel = + (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0; + mpll_param->qdr = + (args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0; + mpll_param->half_rate = + (args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + return 0; +} + void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) { DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; @@ -2620,10 +2989,53 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev, + u32 eng_clock, u32 mem_clock) +{ + SET_ENGINE_CLOCK_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings); + u32 tmp; + + memset(&args, 0, sizeof(args)); + + tmp = eng_clock & SET_CLOCK_FREQ_MASK; + tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24); + + args.ulTargetEngineClock = cpu_to_le32(tmp); + if (mem_clock) + args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +void radeon_atom_update_memory_dll(struct radeon_device *rdev, + u32 mem_clock) +{ + u32 args; + int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings); + + args = cpu_to_le32(mem_clock); /* 10 khz */ + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +void radeon_atom_set_ac_timing(struct radeon_device *rdev, + u32 mem_clock) +{ + SET_MEMORY_CLOCK_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings); + u32 tmp = mem_clock | (COMPUTE_MEMORY_PLL_PARAM << 24); + + args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */ + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + union set_voltage { struct _SET_VOLTAGE_PS_ALLOCATION alloc; struct _SET_VOLTAGE_PARAMETERS v1; struct _SET_VOLTAGE_PARAMETERS_V2 v2; + struct _SET_VOLTAGE_PARAMETERS_V1_3 v3; }; void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) @@ -2650,6 +3062,11 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; args.v2.usVoltageLevel = cpu_to_le16(voltage_level); break; + case 3: + args.v3.ucVoltageType = voltage_type; + args.v3.ucVoltageMode = ATOM_SET_VOLTAGE; + args.v3.usVoltageLevel = cpu_to_le16(voltage_level); + break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); return; @@ -2658,8 +3075,8 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } -int radeon_atom_get_max_vddc(struct radeon_device *rdev, - u16 *voltage) +int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, + u16 voltage_id, u16 *voltage) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); @@ -2680,6 +3097,176 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, *voltage = le16_to_cpu(args.v2.usVoltageLevel); break; + case 3: + args.v3.ucVoltageType = voltage_type; + args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL; + args.v3.usVoltageLevel = cpu_to_le16(voltage_id); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + *voltage = le16_to_cpu(args.v3.usVoltageLevel); + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + + return 0; +} + +int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev, + u16 *voltage, + u16 leakage_idx) +{ + return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); +} + +int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev, + u16 *leakage_id) +{ + union set_voltage args; + int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); + u8 frev, crev; + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return -EINVAL; + + switch (crev) { + case 3: + case 4: + args.v3.ucVoltageType = 0; + args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID; + args.v3.usVoltageLevel = 0; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + *leakage_id = le16_to_cpu(args.v3.usVoltageLevel); + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + + return 0; +} + +int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev, + u16 *vddc, u16 *vddci, + u16 virtual_voltage_id, + u16 vbios_voltage_id) +{ + int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); + u8 frev, crev; + u16 data_offset, size; + int i, j; + ATOM_ASIC_PROFILING_INFO_V2_1 *profile; + u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; + + *vddc = 0; + *vddci = 0; + + if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) + return -EINVAL; + + profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (frev) { + case 1: + return -EINVAL; + case 2: + switch (crev) { + case 1: + if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1)) + return -EINVAL; + leakage_bin = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usLeakageBinArrayOffset)); + vddc_id_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDC_IdArrayOffset)); + vddc_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDC_LevelArrayOffset)); + vddci_id_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDCI_IdArrayOffset)); + vddci_buf = (u16 *) + (rdev->mode_info.atom_context->bios + data_offset + + le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset)); + + if (profile->ucElbVDDC_Num > 0) { + for (i = 0; i < profile->ucElbVDDC_Num; i++) { + if (vddc_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (vbios_voltage_id <= leakage_bin[j]) { + *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; + break; + } + } + break; + } + } + } + if (profile->ucElbVDDCI_Num > 0) { + for (i = 0; i < profile->ucElbVDDCI_Num; i++) { + if (vddci_id_buf[i] == virtual_voltage_id) { + for (j = 0; j < profile->ucLeakageBinNum; j++) { + if (vbios_voltage_id <= leakage_bin[j]) { + *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i]; + break; + } + } + break; + } + } + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + + return 0; +} + +int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, + u16 voltage_level, u8 voltage_type, + u32 *gpio_value, u32 *gpio_mask) +{ + union set_voltage args; + int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); + u8 frev, crev; + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return -EINVAL; + + switch (crev) { + case 1: + return -EINVAL; + case 2: + args.v2.ucVoltageType = voltage_type; + args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK; + args.v2.usVoltageLevel = cpu_to_le16(voltage_level); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + *gpio_mask = le32_to_cpu(*(u32 *)&args.v2); + + args.v2.ucVoltageType = voltage_type; + args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL; + args.v2.usVoltageLevel = cpu_to_le16(voltage_level); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + *gpio_value = le32_to_cpu(*(u32 *)&args.v2); + break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); return -EINVAL; @@ -2688,6 +3275,658 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, return 0; } +union voltage_object_info { + struct _ATOM_VOLTAGE_OBJECT_INFO v1; + struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2; + struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3; +}; + +union voltage_object { + struct _ATOM_VOLTAGE_OBJECT v1; + struct _ATOM_VOLTAGE_OBJECT_V2 v2; + union _ATOM_VOLTAGE_OBJECT_V3 v3; +}; + +static ATOM_VOLTAGE_OBJECT *atom_lookup_voltage_object_v1(ATOM_VOLTAGE_OBJECT_INFO *v1, + u8 voltage_type) +{ + u32 size = le16_to_cpu(v1->sHeader.usStructureSize); + u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO, asVoltageObj[0]); + u8 *start = (u8 *)v1; + + while (offset < size) { + ATOM_VOLTAGE_OBJECT *vo = (ATOM_VOLTAGE_OBJECT *)(start + offset); + if (vo->ucVoltageType == voltage_type) + return vo; + offset += offsetof(ATOM_VOLTAGE_OBJECT, asFormula.ucVIDAdjustEntries) + + vo->asFormula.ucNumOfVoltageEntries; + } + return NULL; +} + +static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT_INFO_V2 *v2, + u8 voltage_type) +{ + u32 size = le16_to_cpu(v2->sHeader.usStructureSize); + u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]); + u8 *start = (u8*)v2; + + while (offset < size) { + ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset); + if (vo->ucVoltageType == voltage_type) + return vo; + offset += offsetof(ATOM_VOLTAGE_OBJECT_V2, asFormula.asVIDAdjustEntries) + + (vo->asFormula.ucNumOfVoltageEntries * sizeof(VOLTAGE_LUT_ENTRY)); + } + return NULL; +} + +static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3, + u8 voltage_type, u8 voltage_mode) +{ + u32 size = le16_to_cpu(v3->sHeader.usStructureSize); + u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]); + u8 *start = (u8*)v3; + + while (offset < size) { + ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); + if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) && + (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode)) + return vo; + offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize); + } + return NULL; +} + +bool +radeon_atom_is_voltage_gpio(struct radeon_device *rdev, + u8 voltage_type, u8 voltage_mode) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 data_offset, size; + union voltage_object_info *voltage_info; + union voltage_object *voltage_object = NULL; + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + voltage_info = (union voltage_object_info *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (frev) { + case 1: + case 2: + switch (crev) { + case 1: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type); + if (voltage_object && + (voltage_object->v1.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO)) + return true; + break; + case 2: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type); + if (voltage_object && + (voltage_object->v2.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO)) + return true; + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return false; + } + break; + case 3: + switch (crev) { + case 1: + if (atom_lookup_voltage_object_v3(&voltage_info->v3, + voltage_type, voltage_mode)) + return true; + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return false; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return false; + } + + } + return false; +} + +int radeon_atom_get_max_voltage(struct radeon_device *rdev, + u8 voltage_type, u16 *max_voltage) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 data_offset, size; + union voltage_object_info *voltage_info; + union voltage_object *voltage_object = NULL; + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + voltage_info = (union voltage_object_info *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (crev) { + case 1: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type); + if (voltage_object) { + ATOM_VOLTAGE_FORMULA *formula = + &voltage_object->v1.asFormula; + if (formula->ucFlag & 1) + *max_voltage = + le16_to_cpu(formula->usVoltageBaseLevel) + + formula->ucNumOfVoltageEntries / 2 * + le16_to_cpu(formula->usVoltageStep); + else + *max_voltage = + le16_to_cpu(formula->usVoltageBaseLevel) + + (formula->ucNumOfVoltageEntries - 1) * + le16_to_cpu(formula->usVoltageStep); + return 0; + } + break; + case 2: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type); + if (voltage_object) { + ATOM_VOLTAGE_FORMULA_V2 *formula = + &voltage_object->v2.asFormula; + if (formula->ucNumOfVoltageEntries) { + VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *) + ((u8 *)&formula->asVIDAdjustEntries[0] + + (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1))); + *max_voltage = + le16_to_cpu(lut->usVoltageValue); + return 0; + } + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + + } + return -EINVAL; +} + +int radeon_atom_get_min_voltage(struct radeon_device *rdev, + u8 voltage_type, u16 *min_voltage) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 data_offset, size; + union voltage_object_info *voltage_info; + union voltage_object *voltage_object = NULL; + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + voltage_info = (union voltage_object_info *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (crev) { + case 1: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type); + if (voltage_object) { + ATOM_VOLTAGE_FORMULA *formula = + &voltage_object->v1.asFormula; + *min_voltage = + le16_to_cpu(formula->usVoltageBaseLevel); + return 0; + } + break; + case 2: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type); + if (voltage_object) { + ATOM_VOLTAGE_FORMULA_V2 *formula = + &voltage_object->v2.asFormula; + if (formula->ucNumOfVoltageEntries) { + *min_voltage = + le16_to_cpu(formula->asVIDAdjustEntries[ + 0 + ].usVoltageValue); + return 0; + } + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + + } + return -EINVAL; +} + +int radeon_atom_get_voltage_step(struct radeon_device *rdev, + u8 voltage_type, u16 *voltage_step) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 data_offset, size; + union voltage_object_info *voltage_info; + union voltage_object *voltage_object = NULL; + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + voltage_info = (union voltage_object_info *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (crev) { + case 1: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type); + if (voltage_object) { + ATOM_VOLTAGE_FORMULA *formula = + &voltage_object->v1.asFormula; + if (formula->ucFlag & 1) + *voltage_step = + (le16_to_cpu(formula->usVoltageStep) + 1) / 2; + else + *voltage_step = + le16_to_cpu(formula->usVoltageStep); + return 0; + } + break; + case 2: + return -EINVAL; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + + } + return -EINVAL; +} + +int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, + u8 voltage_type, + u16 nominal_voltage, + u16 *true_voltage) +{ + u16 min_voltage, max_voltage, voltage_step; + + if (radeon_atom_get_max_voltage(rdev, voltage_type, &max_voltage)) + return -EINVAL; + if (radeon_atom_get_min_voltage(rdev, voltage_type, &min_voltage)) + return -EINVAL; + if (radeon_atom_get_voltage_step(rdev, voltage_type, &voltage_step)) + return -EINVAL; + + if (nominal_voltage <= min_voltage) + *true_voltage = min_voltage; + else if (nominal_voltage >= max_voltage) + *true_voltage = max_voltage; + else + *true_voltage = min_voltage + + ((nominal_voltage - min_voltage) / voltage_step) * + voltage_step; + + return 0; +} + +int radeon_atom_get_voltage_table(struct radeon_device *rdev, + u8 voltage_type, u8 voltage_mode, + struct atom_voltage_table *voltage_table) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 data_offset, size; + int i, ret; + union voltage_object_info *voltage_info; + union voltage_object *voltage_object = NULL; + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + voltage_info = (union voltage_object_info *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (frev) { + case 1: + case 2: + switch (crev) { + case 1: + DRM_ERROR("old table version %d, %d\n", frev, crev); + return -EINVAL; + case 2: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type); + if (voltage_object) { + ATOM_VOLTAGE_FORMULA_V2 *formula = + &voltage_object->v2.asFormula; + VOLTAGE_LUT_ENTRY *lut; + if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES) + return -EINVAL; + lut = &formula->asVIDAdjustEntries[0]; + for (i = 0; i < formula->ucNumOfVoltageEntries; i++) { + voltage_table->entries[i].value = + le16_to_cpu(lut->usVoltageValue); + ret = radeon_atom_get_voltage_gpio_settings(rdev, + voltage_table->entries[i].value, + voltage_type, + &voltage_table->entries[i].smio_low, + &voltage_table->mask_low); + if (ret) + return ret; + lut = (VOLTAGE_LUT_ENTRY *) + ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY)); + } + voltage_table->count = formula->ucNumOfVoltageEntries; + return 0; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + break; + case 3: + switch (crev) { + case 1: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v3(&voltage_info->v3, + voltage_type, voltage_mode); + if (voltage_object) { + ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio = + &voltage_object->v3.asGpioVoltageObj; + VOLTAGE_LUT_ENTRY_V2 *lut; + if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES) + return -EINVAL; + lut = &gpio->asVolGpioLut[0]; + for (i = 0; i < gpio->ucGpioEntryNum; i++) { + voltage_table->entries[i].value = + le16_to_cpu(lut->usVoltageValue); + voltage_table->entries[i].smio_low = + le32_to_cpu(lut->ulVoltageId); + lut = (VOLTAGE_LUT_ENTRY_V2 *) + ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2)); + } + voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal); + voltage_table->count = gpio->ucGpioEntryNum; + voltage_table->phase_delay = gpio->ucPhaseDelay; + return 0; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + } + return -EINVAL; +} + +union vram_info { + struct _ATOM_VRAM_INFO_V3 v1_3; + struct _ATOM_VRAM_INFO_V4 v1_4; + struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1; +}; + +int radeon_atom_get_memory_info(struct radeon_device *rdev, + u8 module_index, struct atom_memory_info *mem_info) +{ + int index = GetIndexIntoMasterTable(DATA, VRAM_Info); + u8 frev, crev, i; + u16 data_offset, size; + union vram_info *vram_info; + + memset(mem_info, 0, sizeof(struct atom_memory_info)); + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + vram_info = (union vram_info *) + (rdev->mode_info.atom_context->bios + data_offset); + switch (frev) { + case 1: + switch (crev) { + case 3: + /* r6xx */ + if (module_index < vram_info->v1_3.ucNumOfVRAMModule) { + ATOM_VRAM_MODULE_V3 *vram_module = + (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo; + + for (i = 0; i < module_index; i++) { + if (le16_to_cpu(vram_module->usSize) == 0) + return -EINVAL; + vram_module = (ATOM_VRAM_MODULE_V3 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usSize)); + } + mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf; + mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0; + } else + return -EINVAL; + break; + case 4: + /* r7xx, evergreen */ + if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { + ATOM_VRAM_MODULE_V4 *vram_module = + (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; + + for (i = 0; i < module_index; i++) { + if (le16_to_cpu(vram_module->usModuleSize) == 0) + return -EINVAL; + vram_module = (ATOM_VRAM_MODULE_V4 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); + } + mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; + mem_info->mem_type = vram_module->ucMemoryType & 0xf0; + } else + return -EINVAL; + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + break; + case 2: + switch (crev) { + case 1: + /* ni */ + if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { + ATOM_VRAM_MODULE_V7 *vram_module = + (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo; + + for (i = 0; i < module_index; i++) { + if (le16_to_cpu(vram_module->usModuleSize) == 0) + return -EINVAL; + vram_module = (ATOM_VRAM_MODULE_V7 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); + } + mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; + mem_info->mem_type = vram_module->ucMemoryType & 0xf0; + } else + return -EINVAL; + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + return 0; + } + return -EINVAL; +} + +int radeon_atom_get_mclk_range_table(struct radeon_device *rdev, + bool gddr5, u8 module_index, + struct atom_memory_clock_range_table *mclk_range_table) +{ + int index = GetIndexIntoMasterTable(DATA, VRAM_Info); + u8 frev, crev, i; + u16 data_offset, size; + union vram_info *vram_info; + u32 mem_timing_size = gddr5 ? + sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT); + + memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table)); + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + vram_info = (union vram_info *) + (rdev->mode_info.atom_context->bios + data_offset); + switch (frev) { + case 1: + switch (crev) { + case 3: + DRM_ERROR("old table version %d, %d\n", frev, crev); + return -EINVAL; + case 4: + /* r7xx, evergreen */ + if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { + ATOM_VRAM_MODULE_V4 *vram_module = + (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; + ATOM_MEMORY_TIMING_FORMAT *format; + + for (i = 0; i < module_index; i++) { + if (le16_to_cpu(vram_module->usModuleSize) == 0) + return -EINVAL; + vram_module = (ATOM_VRAM_MODULE_V4 *) + ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); + } + mclk_range_table->num_entries = (u8) + ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / + mem_timing_size); + format = &vram_module->asMemTiming[0]; + for (i = 0; i < mclk_range_table->num_entries; i++) { + mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); + format = (ATOM_MEMORY_TIMING_FORMAT *) + ((u8 *)format + mem_timing_size); + } + } else + return -EINVAL; + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + break; + case 2: + DRM_ERROR("new table version %d, %d\n", frev, crev); + return -EINVAL; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + return 0; + } + return -EINVAL; +} + +#define MEM_ID_MASK 0xff000000 +#define MEM_ID_SHIFT 24 +#define CLOCK_RANGE_MASK 0x00ffffff +#define CLOCK_RANGE_SHIFT 0 +#define LOW_NIBBLE_MASK 0xf +#define DATA_EQU_PREV 0 +#define DATA_FROM_TABLE 4 + +int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, + u8 module_index, + struct atom_mc_reg_table *reg_table) +{ + int index = GetIndexIntoMasterTable(DATA, VRAM_Info); + u8 frev, crev, num_entries, t_mem_id, num_ranges = 0; + u32 i = 0, j; + u16 data_offset, size; + union vram_info *vram_info; + + memset(reg_table, 0, sizeof(struct atom_mc_reg_table)); + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + vram_info = (union vram_info *) + (rdev->mode_info.atom_context->bios + data_offset); + switch (frev) { + case 1: + DRM_ERROR("old table version %d, %d\n", frev, crev); + return -EINVAL; + case 2: + switch (crev) { + case 1: + if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { + ATOM_INIT_REG_BLOCK *reg_block = + (ATOM_INIT_REG_BLOCK *) + ((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset)); + ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = + (ATOM_MEMORY_SETTING_DATA_BLOCK *) + ((u8 *)reg_block + (2 * sizeof(u16)) + + le16_to_cpu(reg_block->usRegIndexTblSize)); + ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0]; + num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) / + sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1; + if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + while (i < num_entries) { + if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER) + break; + reg_table->mc_reg_address[i].s1 = + (u16)(le16_to_cpu(format->usRegIndex)); + reg_table->mc_reg_address[i].pre_reg_data = + (u8)(format->ucPreRegDataLength); + i++; + format = (ATOM_INIT_REG_INDEX_FORMAT *) + ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); + } + reg_table->last = i; + while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) && + (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) { + t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK) + >> MEM_ID_SHIFT); + if (module_index == t_mem_id) { + reg_table->mc_reg_table_entry[num_ranges].mclk_max = + (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK) + >> CLOCK_RANGE_SHIFT); + for (i = 0, j = 1; i < reg_table->last; i++) { + if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) { + reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = + (u32)le32_to_cpu(*((u32 *)reg_data + j)); + j++; + } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) { + reg_table->mc_reg_table_entry[num_ranges].mc_data[i] = + reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1]; + } + } + num_ranges++; + } + reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) + ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)); + } + if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) + return -EINVAL; + reg_table->num_entries = num_ranges; + } else + return -EINVAL; + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + return 0; + } + return -EINVAL; +} + void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -2707,6 +3946,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) /* tell the bios not to handle mode switching */ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; + /* clear the vbios dpms state */ + if (ASIC_IS_DCE4(rdev)) + bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE; + if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); @@ -3020,6 +4263,9 @@ radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_2_scratch; + if (ASIC_IS_DCE4(rdev)) + return; + if (rdev->family >= CHIP_R600) bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); else |
