1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "drmP.h" 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "rv770d.h" 29 #include "r600_dpm.h" 30 #include "rv770_dpm.h" 31 #include "cypress_dpm.h" 32 #include "atom.h" 33 #include <linux/seq_file.h> 34 35 #define MC_CG_ARB_FREQ_F0 0x0a 36 #define MC_CG_ARB_FREQ_F1 0x0b 37 #define MC_CG_ARB_FREQ_F2 0x0c 38 #define MC_CG_ARB_FREQ_F3 0x0d 39 40 #define MC_CG_SEQ_DRAMCONF_S0 0x05 41 #define MC_CG_SEQ_DRAMCONF_S1 0x06 42 43 #define PCIE_BUS_CLK 10000 44 #define TCLK (PCIE_BUS_CLK / 10) 45 46 #define SMC_RAM_END 0xC000 47 48 struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps) 49 { 50 struct rv7xx_ps *ps = rps->ps_priv; 51 52 return ps; 53 } 54 55 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev) 56 { 57 struct rv7xx_power_info *pi = rdev->pm.dpm.priv; 58 59 return pi; 60 } 61 62 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev) 63 { 64 struct evergreen_power_info *pi = rdev->pm.dpm.priv; 65 66 return pi; 67 } 68 69 static void rv770_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev, 70 bool enable) 71 { 72 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 73 u32 tmp; 74 75 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 76 if (enable) { 77 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; 78 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1); 79 tmp |= LC_GEN2_EN_STRAP; 80 } else { 81 if (!pi->boot_in_gen2) { 82 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK; 83 tmp &= ~LC_GEN2_EN_STRAP; 84 } 85 } 86 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) || 87 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) 88 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp); 89 90 } 91 92 static void rv770_enable_l0s(struct radeon_device *rdev) 93 { 94 u32 tmp; 95 96 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK; 97 tmp |= LC_L0S_INACTIVITY(3); 98 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp); 99 } 100 101 static void rv770_enable_l1(struct radeon_device *rdev) 102 { 103 u32 tmp; 104 105 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL); 106 tmp &= ~LC_L1_INACTIVITY_MASK; 107 tmp |= LC_L1_INACTIVITY(4); 108 tmp &= ~LC_PMI_TO_L1_DIS; 109 tmp &= ~LC_ASPM_TO_L1_DIS; 110 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp); 111 } 112 113 static void rv770_enable_pll_sleep_in_l1(struct radeon_device *rdev) 114 { 115 u32 tmp; 116 117 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK; 118 tmp |= LC_L1_INACTIVITY(8); 119 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp); 120 121 /* NOTE, this is a PCIE indirect reg, not PCIE PORT */ 122 tmp = RREG32_PCIE(PCIE_P_CNTL); 123 tmp |= P_PLL_PWRDN_IN_L1L23; 124 tmp &= ~P_PLL_BUF_PDNB; 125 tmp &= ~P_PLL_PDNB; 126 tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF; 127 WREG32_PCIE(PCIE_P_CNTL, tmp); 128 } 129 130 static void rv770_gfx_clock_gating_enable(struct radeon_device *rdev, 131 bool enable) 132 { 133 if (enable) 134 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); 135 else { 136 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); 137 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); 138 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); 139 RREG32(GB_TILING_CONFIG); 140 } 141 } 142 143 static void rv770_mg_clock_gating_enable(struct radeon_device *rdev, 144 bool enable) 145 { 146 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 147 148 if (enable) { 149 u32 mgcg_cgtt_local0; 150 151 if (rdev->family == CHIP_RV770) 152 mgcg_cgtt_local0 = RV770_MGCGTTLOCAL0_DFLT; 153 else 154 mgcg_cgtt_local0 = RV7XX_MGCGTTLOCAL0_DFLT; 155 156 WREG32(CG_CGTT_LOCAL_0, mgcg_cgtt_local0); 157 WREG32(CG_CGTT_LOCAL_1, (RV770_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF)); 158 159 if (pi->mgcgtssm) 160 WREG32(CGTS_SM_CTRL_REG, RV770_MGCGCGTSSMCTRL_DFLT); 161 } else { 162 WREG32(CG_CGTT_LOCAL_0, 0xFFFFFFFF); 163 WREG32(CG_CGTT_LOCAL_1, 0xFFFFCFFF); 164 } 165 } 166 167 void rv770_restore_cgcg(struct radeon_device *rdev) 168 { 169 bool dpm_en = false, cg_en = false; 170 171 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) 172 dpm_en = true; 173 if (RREG32(SCLK_PWRMGT_CNTL) & DYN_GFX_CLK_OFF_EN) 174 cg_en = true; 175 176 if (dpm_en && !cg_en) 177 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); 178 } 179 180 static void rv770_start_dpm(struct radeon_device *rdev) 181 { 182 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); 183 184 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); 185 186 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); 187 } 188 189 void rv770_stop_dpm(struct radeon_device *rdev) 190 { 191 PPSMC_Result result; 192 193 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); 194 195 if (result != PPSMC_Result_OK) 196 DRM_ERROR("Could not force DPM to low.\n"); 197 198 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 199 200 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 201 202 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); 203 } 204 205 bool rv770_dpm_enabled(struct radeon_device *rdev) 206 { 207 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) 208 return true; 209 else 210 return false; 211 } 212 213 void rv770_enable_thermal_protection(struct radeon_device *rdev, 214 bool enable) 215 { 216 if (enable) 217 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); 218 else 219 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); 220 } 221 222 void rv770_enable_acpi_pm(struct radeon_device *rdev) 223 { 224 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); 225 } 226 227 u8 rv770_get_seq_value(struct radeon_device *rdev, 228 struct rv7xx_pl *pl) 229 { 230 return (pl->flags & ATOM_PPLIB_R600_FLAGS_LOWPOWER) ? 231 MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1; 232 } 233 234 #if 0 235 int rv770_read_smc_soft_register(struct radeon_device *rdev, 236 u16 reg_offset, u32 *value) 237 { 238 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 239 240 return rv770_read_smc_sram_dword(rdev, 241 pi->soft_regs_start + reg_offset, 242 value, pi->sram_end); 243 } 244 #endif 245 246 int rv770_write_smc_soft_register(struct radeon_device *rdev, 247 u16 reg_offset, u32 value) 248 { 249 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 250 251 return rv770_write_smc_sram_dword(rdev, 252 pi->soft_regs_start + reg_offset, 253 value, pi->sram_end); 254 } 255 256 int rv770_populate_smc_t(struct radeon_device *rdev, 257 struct radeon_ps *radeon_state, 258 RV770_SMC_SWSTATE *smc_state) 259 { 260 struct rv7xx_ps *state = rv770_get_ps(radeon_state); 261 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 262 int i; 263 int a_n; 264 int a_d; 265 u8 l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; 266 u8 r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; 267 u32 a_t; 268 269 l[0] = 0; 270 r[2] = 100; 271 272 a_n = (int)state->medium.sclk * pi->lmp + 273 (int)state->low.sclk * (R600_AH_DFLT - pi->rlp); 274 a_d = (int)state->low.sclk * (100 - (int)pi->rlp) + 275 (int)state->medium.sclk * pi->lmp; 276 277 l[1] = (u8)(pi->lmp - (int)pi->lmp * a_n / a_d); 278 r[0] = (u8)(pi->rlp + (100 - (int)pi->rlp) * a_n / a_d); 279 280 a_n = (int)state->high.sclk * pi->lhp + (int)state->medium.sclk * 281 (R600_AH_DFLT - pi->rmp); 282 a_d = (int)state->medium.sclk * (100 - (int)pi->rmp) + 283 (int)state->high.sclk * pi->lhp; 284 285 l[2] = (u8)(pi->lhp - (int)pi->lhp * a_n / a_d); 286 r[1] = (u8)(pi->rmp + (100 - (int)pi->rmp) * a_n / a_d); 287 288 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) { 289 a_t = CG_R(r[i] * pi->bsp / 200) | CG_L(l[i] * pi->bsp / 200); 290 smc_state->levels[i].aT = cpu_to_be32(a_t); 291 } 292 293 a_t = CG_R(r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200) | 294 CG_L(l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200); 295 296 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].aT = 297 cpu_to_be32(a_t); 298 299 return 0; 300 } 301 302 int rv770_populate_smc_sp(struct radeon_device *rdev, 303 struct radeon_ps *radeon_state, 304 RV770_SMC_SWSTATE *smc_state) 305 { 306 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 307 int i; 308 309 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) 310 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); 311 312 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].bSP = 313 cpu_to_be32(pi->psp); 314 315 return 0; 316 } 317 318 static void rv770_calculate_fractional_mpll_feedback_divider(u32 memory_clock, 319 u32 reference_clock, 320 bool gddr5, 321 struct atom_clock_dividers *dividers, 322 u32 *clkf, 323 u32 *clkfrac) 324 { 325 u32 post_divider, reference_divider, feedback_divider8; 326 u32 fyclk; 327 328 if (gddr5) 329 fyclk = (memory_clock * 8) / 2; 330 else 331 fyclk = (memory_clock * 4) / 2; 332 333 post_divider = dividers->post_div; 334 reference_divider = dividers->ref_div; 335 336 feedback_divider8 = 337 (8 * fyclk * reference_divider * post_divider) / reference_clock; 338 339 *clkf = feedback_divider8 / 8; 340 *clkfrac = feedback_divider8 % 8; 341 } 342 343 static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv) 344 { 345 int ret = 0; 346 347 switch (postdiv) { 348 case 1: 349 *encoded_postdiv = 0; 350 break; 351 case 2: 352 *encoded_postdiv = 1; 353 break; 354 case 4: 355 *encoded_postdiv = 2; 356 break; 357 case 8: 358 *encoded_postdiv = 3; 359 break; 360 case 16: 361 *encoded_postdiv = 4; 362 break; 363 default: 364 ret = -EINVAL; 365 break; 366 } 367 368 return ret; 369 } 370 371 u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf) 372 { 373 if (clkf <= 0x10) 374 return 0x4B; 375 if (clkf <= 0x19) 376 return 0x5B; 377 if (clkf <= 0x21) 378 return 0x2B; 379 if (clkf <= 0x27) 380 return 0x6C; 381 if (clkf <= 0x31) 382 return 0x9D; 383 return 0xC6; 384 } 385 386 static int rv770_populate_mclk_value(struct radeon_device *rdev, 387 u32 engine_clock, u32 memory_clock, 388 RV7XX_SMC_MCLK_VALUE *mclk) 389 { 390 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 391 u8 encoded_reference_dividers[] = { 0, 16, 17, 20, 21 }; 392 u32 mpll_ad_func_cntl = 393 pi->clk_regs.rv770.mpll_ad_func_cntl; 394 u32 mpll_ad_func_cntl_2 = 395 pi->clk_regs.rv770.mpll_ad_func_cntl_2; 396 u32 mpll_dq_func_cntl = 397 pi->clk_regs.rv770.mpll_dq_func_cntl; 398 u32 mpll_dq_func_cntl_2 = 399 pi->clk_regs.rv770.mpll_dq_func_cntl_2; 400 u32 mclk_pwrmgt_cntl = 401 pi->clk_regs.rv770.mclk_pwrmgt_cntl; 402 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl; 403 struct atom_clock_dividers dividers; 404 u32 reference_clock = rdev->clock.mpll.reference_freq; 405 u32 clkf, clkfrac; 406 u32 postdiv_yclk; 407 u32 ibias; 408 int ret; 409 410 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM, 411 memory_clock, false, ÷rs); 412 if (ret) 413 return ret; 414 415 if ((dividers.ref_div < 1) || (dividers.ref_div > 5)) 416 return -EINVAL; 417 418 rv770_calculate_fractional_mpll_feedback_divider(memory_clock, reference_clock, 419 pi->mem_gddr5, 420 ÷rs, &clkf, &clkfrac); 421 422 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk); 423 if (ret) 424 return ret; 425 426 ibias = rv770_map_clkf_to_ibias(rdev, clkf); 427 428 mpll_ad_func_cntl &= ~(CLKR_MASK | 429 YCLK_POST_DIV_MASK | 430 CLKF_MASK | 431 CLKFRAC_MASK | 432 IBIAS_MASK); 433 mpll_ad_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]); 434 mpll_ad_func_cntl |= YCLK_POST_DIV(postdiv_yclk); 435 mpll_ad_func_cntl |= CLKF(clkf); 436 mpll_ad_func_cntl |= CLKFRAC(clkfrac); 437 mpll_ad_func_cntl |= IBIAS(ibias); 438 439 if (dividers.vco_mode) 440 mpll_ad_func_cntl_2 |= VCO_MODE; 441 else 442 mpll_ad_func_cntl_2 &= ~VCO_MODE; 443 444 if (pi->mem_gddr5) { 445 rv770_calculate_fractional_mpll_feedback_divider(memory_clock, 446 reference_clock, 447 pi->mem_gddr5, 448 ÷rs, &clkf, &clkfrac); 449 450 ibias = rv770_map_clkf_to_ibias(rdev, clkf); 451 452 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk); 453 if (ret) 454 return ret; 455 456 mpll_dq_func_cntl &= ~(CLKR_MASK | 457 YCLK_POST_DIV_MASK | 458 CLKF_MASK | 459 CLKFRAC_MASK | 460 IBIAS_MASK); 461 mpll_dq_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]); 462 mpll_dq_func_cntl |= YCLK_POST_DIV(postdiv_yclk); 463 mpll_dq_func_cntl |= CLKF(clkf); 464 mpll_dq_func_cntl |= CLKFRAC(clkfrac); 465 mpll_dq_func_cntl |= IBIAS(ibias); 466 467 if (dividers.vco_mode) 468 mpll_dq_func_cntl_2 |= VCO_MODE; 469 else 470 mpll_dq_func_cntl_2 &= ~VCO_MODE; 471 } 472 473 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock); 474 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); 475 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); 476 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); 477 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); 478 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); 479 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); 480 481 return 0; 482 } 483 484 static int rv770_populate_sclk_value(struct radeon_device *rdev, 485 u32 engine_clock, 486 RV770_SMC_SCLK_VALUE *sclk) 487 { 488 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 489 struct atom_clock_dividers dividers; 490 u32 spll_func_cntl = 491 pi->clk_regs.rv770.cg_spll_func_cntl; 492 u32 spll_func_cntl_2 = 493 pi->clk_regs.rv770.cg_spll_func_cntl_2; 494 u32 spll_func_cntl_3 = 495 pi->clk_regs.rv770.cg_spll_func_cntl_3; 496 u32 cg_spll_spread_spectrum = 497 pi->clk_regs.rv770.cg_spll_spread_spectrum; 498 u32 cg_spll_spread_spectrum_2 = 499 pi->clk_regs.rv770.cg_spll_spread_spectrum_2; 500 u64 tmp; 501 u32 reference_clock = rdev->clock.spll.reference_freq; 502 u32 reference_divider, post_divider; 503 u32 fbdiv; 504 int ret; 505 506 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 507 engine_clock, false, ÷rs); 508 if (ret) 509 return ret; 510 511 reference_divider = 1 + dividers.ref_div; 512 513 if (dividers.enable_post_div) 514 post_divider = (0x0f & (dividers.post_div >> 4)) + (0x0f & dividers.post_div) + 2; 515 else 516 post_divider = 1; 517 518 tmp = (u64) engine_clock * reference_divider * post_divider * 16384; 519 do_div(tmp, reference_clock); 520 fbdiv = (u32) tmp; 521 522 if (dividers.enable_post_div) 523 spll_func_cntl |= SPLL_DIVEN; 524 else 525 spll_func_cntl &= ~SPLL_DIVEN; 526 spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK); 527 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); 528 spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf); 529 spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf); 530 531 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 532 spll_func_cntl_2 |= SCLK_MUX_SEL(2); 533 534 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 535 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 536 spll_func_cntl_3 |= SPLL_DITHEN; 537 538 if (pi->sclk_ss) { 539 struct radeon_atom_ss ss; 540 u32 vco_freq = engine_clock * post_divider; 541 542 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 543 ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 544 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 545 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000); 546 547 cg_spll_spread_spectrum &= ~CLKS_MASK; 548 cg_spll_spread_spectrum |= CLKS(clk_s); 549 cg_spll_spread_spectrum |= SSEN; 550 551 cg_spll_spread_spectrum_2 &= ~CLKV_MASK; 552 cg_spll_spread_spectrum_2 |= CLKV(clk_v); 553 } 554 } 555 556 sclk->sclk_value = cpu_to_be32(engine_clock); 557 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); 558 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); 559 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); 560 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum); 561 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2); 562 563 return 0; 564 } 565 566 int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc, 567 RV770_SMC_VOLTAGE_VALUE *voltage) 568 { 569 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 570 int i; 571 572 if (!pi->voltage_control) { 573 voltage->index = 0; 574 voltage->value = 0; 575 return 0; 576 } 577 578 for (i = 0; i < pi->valid_vddc_entries; i++) { 579 if (vddc <= pi->vddc_table[i].vddc) { 580 voltage->index = pi->vddc_table[i].vddc_index; 581 voltage->value = cpu_to_be16(vddc); 582 break; 583 } 584 } 585 586 if (i == pi->valid_vddc_entries) 587 return -EINVAL; 588 589 return 0; 590 } 591 592 int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, 593 RV770_SMC_VOLTAGE_VALUE *voltage) 594 { 595 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 596 597 if (!pi->mvdd_control) { 598 voltage->index = MVDD_HIGH_INDEX; 599 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); 600 return 0; 601 } 602 603 if (mclk <= pi->mvdd_split_frequency) { 604 voltage->index = MVDD_LOW_INDEX; 605 voltage->value = cpu_to_be16(MVDD_LOW_VALUE); 606 } else { 607 voltage->index = MVDD_HIGH_INDEX; 608 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); 609 } 610 611 return 0; 612 } 613 614 static int rv770_convert_power_level_to_smc(struct radeon_device *rdev, 615 struct rv7xx_pl *pl, 616 RV770_SMC_HW_PERFORMANCE_LEVEL *level, 617 u8 watermark_level) 618 { 619 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 620 int ret; 621 622 level->gen2PCIE = pi->pcie_gen2 ? 623 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0; 624 level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0; 625 level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0; 626 level->displayWatermark = watermark_level; 627 628 if (rdev->family == CHIP_RV740) 629 ret = rv740_populate_sclk_value(rdev, pl->sclk, 630 &level->sclk); 631 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 632 ret = rv730_populate_sclk_value(rdev, pl->sclk, 633 &level->sclk); 634 else 635 ret = rv770_populate_sclk_value(rdev, pl->sclk, 636 &level->sclk); 637 if (ret) 638 return ret; 639 640 if (rdev->family == CHIP_RV740) { 641 if (pi->mem_gddr5) { 642 if (pl->mclk <= pi->mclk_strobe_mode_threshold) 643 level->strobeMode = 644 rv740_get_mclk_frequency_ratio(pl->mclk) | 0x10; 645 else 646 level->strobeMode = 0; 647 648 if (pl->mclk > pi->mclk_edc_enable_threshold) 649 level->mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG; 650 else 651 level->mcFlags = 0; 652 } 653 ret = rv740_populate_mclk_value(rdev, pl->sclk, 654 pl->mclk, &level->mclk); 655 } else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 656 ret = rv730_populate_mclk_value(rdev, pl->sclk, 657 pl->mclk, &level->mclk); 658 else 659 ret = rv770_populate_mclk_value(rdev, pl->sclk, 660 pl->mclk, &level->mclk); 661 if (ret) 662 return ret; 663 664 ret = rv770_populate_vddc_value(rdev, pl->vddc, 665 &level->vddc); 666 if (ret) 667 return ret; 668 669 ret = rv770_populate_mvdd_value(rdev, pl->mclk, &level->mvdd); 670 671 return ret; 672 } 673 674 static int rv770_convert_power_state_to_smc(struct radeon_device *rdev, 675 struct radeon_ps *radeon_state, 676 RV770_SMC_SWSTATE *smc_state) 677 { 678 struct rv7xx_ps *state = rv770_get_ps(radeon_state); 679 int ret; 680 681 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC)) 682 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; 683 684 ret = rv770_convert_power_level_to_smc(rdev, 685 &state->low, 686 &smc_state->levels[0], 687 PPSMC_DISPLAY_WATERMARK_LOW); 688 if (ret) 689 return ret; 690 691 ret = rv770_convert_power_level_to_smc(rdev, 692 &state->medium, 693 &smc_state->levels[1], 694 PPSMC_DISPLAY_WATERMARK_LOW); 695 if (ret) 696 return ret; 697 698 ret = rv770_convert_power_level_to_smc(rdev, 699 &state->high, 700 &smc_state->levels[2], 701 PPSMC_DISPLAY_WATERMARK_HIGH); 702 if (ret) 703 return ret; 704 705 smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1; 706 smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2; 707 smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3; 708 709 smc_state->levels[0].seqValue = rv770_get_seq_value(rdev, 710 &state->low); 711 smc_state->levels[1].seqValue = rv770_get_seq_value(rdev, 712 &state->medium); 713 smc_state->levels[2].seqValue = rv770_get_seq_value(rdev, 714 &state->high); 715 716 rv770_populate_smc_sp(rdev, radeon_state, smc_state); 717 718 return rv770_populate_smc_t(rdev, radeon_state, smc_state); 719 720 } 721 722 u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev, 723 u32 engine_clock) 724 { 725 u32 dram_rows; 726 u32 dram_refresh_rate; 727 u32 mc_arb_rfsh_rate; 728 u32 tmp; 729 730 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 731 dram_rows = 1 << (tmp + 10); 732 tmp = RREG32(MC_SEQ_MISC0) & 3; 733 dram_refresh_rate = 1 << (tmp + 3); 734 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; 735 736 return mc_arb_rfsh_rate; 737 } 738 739 static void rv770_program_memory_timing_parameters(struct radeon_device *rdev, 740 struct radeon_ps *radeon_state) 741 { 742 struct rv7xx_ps *state = rv770_get_ps(radeon_state); 743 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 744 u32 sqm_ratio; 745 u32 arb_refresh_rate; 746 u32 high_clock; 747 748 if (state->high.sclk < (state->low.sclk * 0xFF / 0x40)) 749 high_clock = state->high.sclk; 750 else 751 high_clock = (state->low.sclk * 0xFF / 0x40); 752 753 radeon_atom_set_engine_dram_timings(rdev, high_clock, 754 state->high.mclk); 755 756 sqm_ratio = 757 STATE0(64 * high_clock / pi->boot_sclk) | 758 STATE1(64 * high_clock / state->low.sclk) | 759 STATE2(64 * high_clock / state->medium.sclk) | 760 STATE3(64 * high_clock / state->high.sclk); 761 WREG32(MC_ARB_SQM_RATIO, sqm_ratio); 762 763 arb_refresh_rate = 764 POWERMODE0(rv770_calculate_memory_refresh_rate(rdev, pi->boot_sclk)) | 765 POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) | 766 POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) | 767 POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk)); 768 WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate); 769 } 770 771 void rv770_enable_backbias(struct radeon_device *rdev, 772 bool enable) 773 { 774 if (enable) 775 WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN, ~BACKBIAS_PAD_EN); 776 else 777 WREG32_P(GENERAL_PWRMGT, 0, ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN)); 778 } 779 780 static void rv770_enable_spread_spectrum(struct radeon_device *rdev, 781 bool enable) 782 { 783 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 784 785 if (enable) { 786 if (pi->sclk_ss) 787 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); 788 789 if (pi->mclk_ss) { 790 if (rdev->family == CHIP_RV740) 791 rv740_enable_mclk_spread_spectrum(rdev, true); 792 } 793 } else { 794 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); 795 796 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); 797 798 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN); 799 800 if (rdev->family == CHIP_RV740) 801 rv740_enable_mclk_spread_spectrum(rdev, false); 802 } 803 } 804 805 static void rv770_program_mpll_timing_parameters(struct radeon_device *rdev) 806 { 807 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 808 809 if ((rdev->family == CHIP_RV770) && !pi->mem_gddr5) { 810 WREG32(MPLL_TIME, 811 (MPLL_LOCK_TIME(R600_MPLLLOCKTIME_DFLT * pi->ref_div) | 812 MPLL_RESET_TIME(R600_MPLLRESETTIME_DFLT))); 813 } 814 } 815 816 void rv770_setup_bsp(struct radeon_device *rdev) 817 { 818 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 819 u32 xclk = radeon_get_xclk(rdev); 820 821 r600_calculate_u_and_p(pi->asi, 822 xclk, 823 16, 824 &pi->bsp, 825 &pi->bsu); 826 827 r600_calculate_u_and_p(pi->pasi, 828 xclk, 829 16, 830 &pi->pbsp, 831 &pi->pbsu); 832 833 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); 834 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); 835 836 WREG32(CG_BSP, pi->dsp); 837 838 } 839 840 void rv770_program_git(struct radeon_device *rdev) 841 { 842 WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); 843 } 844 845 void rv770_program_tp(struct radeon_device *rdev) 846 { 847 int i; 848 enum r600_td td = R600_TD_DFLT; 849 850 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) 851 WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); 852 853 if (td == R600_TD_AUTO) 854 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); 855 else 856 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); 857 if (td == R600_TD_UP) 858 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); 859 if (td == R600_TD_DOWN) 860 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); 861 } 862 863 void rv770_program_tpp(struct radeon_device *rdev) 864 { 865 WREG32(CG_TPC, R600_TPC_DFLT); 866 } 867 868 void rv770_program_sstp(struct radeon_device *rdev) 869 { 870 WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); 871 } 872 873 void rv770_program_engine_speed_parameters(struct radeon_device *rdev) 874 { 875 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC); 876 } 877 878 static void rv770_enable_display_gap(struct radeon_device *rdev) 879 { 880 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 881 882 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 883 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | 884 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); 885 WREG32(CG_DISPLAY_GAP_CNTL, tmp); 886 } 887 888 void rv770_program_vc(struct radeon_device *rdev) 889 { 890 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 891 892 WREG32(CG_FTV, pi->vrc); 893 } 894 895 void rv770_clear_vc(struct radeon_device *rdev) 896 { 897 WREG32(CG_FTV, 0); 898 } 899 900 int rv770_upload_firmware(struct radeon_device *rdev) 901 { 902 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 903 int ret; 904 905 rv770_reset_smc(rdev); 906 rv770_stop_smc_clock(rdev); 907 908 ret = rv770_load_smc_ucode(rdev, pi->sram_end); 909 if (ret) 910 return ret; 911 912 return 0; 913 } 914 915 static int rv770_populate_smc_acpi_state(struct radeon_device *rdev, 916 RV770_SMC_STATETABLE *table) 917 { 918 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 919 920 u32 mpll_ad_func_cntl = 921 pi->clk_regs.rv770.mpll_ad_func_cntl; 922 u32 mpll_ad_func_cntl_2 = 923 pi->clk_regs.rv770.mpll_ad_func_cntl_2; 924 u32 mpll_dq_func_cntl = 925 pi->clk_regs.rv770.mpll_dq_func_cntl; 926 u32 mpll_dq_func_cntl_2 = 927 pi->clk_regs.rv770.mpll_dq_func_cntl_2; 928 u32 spll_func_cntl = 929 pi->clk_regs.rv770.cg_spll_func_cntl; 930 u32 spll_func_cntl_2 = 931 pi->clk_regs.rv770.cg_spll_func_cntl_2; 932 u32 spll_func_cntl_3 = 933 pi->clk_regs.rv770.cg_spll_func_cntl_3; 934 u32 mclk_pwrmgt_cntl; 935 u32 dll_cntl; 936 937 table->ACPIState = table->initialState; 938 939 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; 940 941 if (pi->acpi_vddc) { 942 rv770_populate_vddc_value(rdev, pi->acpi_vddc, 943 &table->ACPIState.levels[0].vddc); 944 if (pi->pcie_gen2) { 945 if (pi->acpi_pcie_gen2) 946 table->ACPIState.levels[0].gen2PCIE = 1; 947 else 948 table->ACPIState.levels[0].gen2PCIE = 0; 949 } else 950 table->ACPIState.levels[0].gen2PCIE = 0; 951 if (pi->acpi_pcie_gen2) 952 table->ACPIState.levels[0].gen2XSP = 1; 953 else 954 table->ACPIState.levels[0].gen2XSP = 0; 955 } else { 956 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table, 957 &table->ACPIState.levels[0].vddc); 958 table->ACPIState.levels[0].gen2PCIE = 0; 959 } 960 961 962 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN; 963 964 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN; 965 966 mclk_pwrmgt_cntl = (MRDCKA0_RESET | 967 MRDCKA1_RESET | 968 MRDCKB0_RESET | 969 MRDCKB1_RESET | 970 MRDCKC0_RESET | 971 MRDCKC1_RESET | 972 MRDCKD0_RESET | 973 MRDCKD1_RESET); 974 975 dll_cntl = 0xff000000; 976 977 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN; 978 979 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 980 spll_func_cntl_2 |= SCLK_MUX_SEL(4); 981 982 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); 983 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2); 984 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); 985 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2); 986 987 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); 988 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl); 989 990 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0; 991 992 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl); 993 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2); 994 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3); 995 996 table->ACPIState.levels[0].sclk.sclk_value = 0; 997 998 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd); 999 1000 table->ACPIState.levels[1] = table->ACPIState.levels[0]; 1001 table->ACPIState.levels[2] = table->ACPIState.levels[0]; 1002 1003 return 0; 1004 } 1005 1006 int rv770_populate_initial_mvdd_value(struct radeon_device *rdev, 1007 RV770_SMC_VOLTAGE_VALUE *voltage) 1008 { 1009 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1010 1011 if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) == 1012 (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) { 1013 voltage->index = MVDD_LOW_INDEX; 1014 voltage->value = cpu_to_be16(MVDD_LOW_VALUE); 1015 } else { 1016 voltage->index = MVDD_HIGH_INDEX; 1017 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); 1018 } 1019 1020 return 0; 1021 } 1022 1023 static int rv770_populate_smc_initial_state(struct radeon_device *rdev, 1024 struct radeon_ps *radeon_state, 1025 RV770_SMC_STATETABLE *table) 1026 { 1027 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state); 1028 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1029 u32 a_t; 1030 1031 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = 1032 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl); 1033 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = 1034 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2); 1035 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = 1036 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl); 1037 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = 1038 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2); 1039 table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = 1040 cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl); 1041 table->initialState.levels[0].mclk.mclk770.vDLL_CNTL = 1042 cpu_to_be32(pi->clk_regs.rv770.dll_cntl); 1043 1044 table->initialState.levels[0].mclk.mclk770.vMPLL_SS = 1045 cpu_to_be32(pi->clk_regs.rv770.mpll_ss1); 1046 table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 = 1047 cpu_to_be32(pi->clk_regs.rv770.mpll_ss2); 1048 1049 table->initialState.levels[0].mclk.mclk770.mclk_value = 1050 cpu_to_be32(initial_state->low.mclk); 1051 1052 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = 1053 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl); 1054 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = 1055 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2); 1056 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = 1057 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3); 1058 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = 1059 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum); 1060 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = 1061 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2); 1062 1063 table->initialState.levels[0].sclk.sclk_value = 1064 cpu_to_be32(initial_state->low.sclk); 1065 1066 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0; 1067 1068 table->initialState.levels[0].seqValue = 1069 rv770_get_seq_value(rdev, &initial_state->low); 1070 1071 rv770_populate_vddc_value(rdev, 1072 initial_state->low.vddc, 1073 &table->initialState.levels[0].vddc); 1074 rv770_populate_initial_mvdd_value(rdev, 1075 &table->initialState.levels[0].mvdd); 1076 1077 a_t = CG_R(0xffff) | CG_L(0); 1078 table->initialState.levels[0].aT = cpu_to_be32(a_t); 1079 1080 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); 1081 1082 if (pi->boot_in_gen2) 1083 table->initialState.levels[0].gen2PCIE = 1; 1084 else 1085 table->initialState.levels[0].gen2PCIE = 0; 1086 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) 1087 table->initialState.levels[0].gen2XSP = 1; 1088 else 1089 table->initialState.levels[0].gen2XSP = 0; 1090 1091 if (rdev->family == CHIP_RV740) { 1092 if (pi->mem_gddr5) { 1093 if (initial_state->low.mclk <= pi->mclk_strobe_mode_threshold) 1094 table->initialState.levels[0].strobeMode = 1095 rv740_get_mclk_frequency_ratio(initial_state->low.mclk) | 0x10; 1096 else 1097 table->initialState.levels[0].strobeMode = 0; 1098 1099 if (initial_state->low.mclk >= pi->mclk_edc_enable_threshold) 1100 table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG; 1101 else 1102 table->initialState.levels[0].mcFlags = 0; 1103 } 1104 } 1105 1106 table->initialState.levels[1] = table->initialState.levels[0]; 1107 table->initialState.levels[2] = table->initialState.levels[0]; 1108 1109 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; 1110 1111 return 0; 1112 } 1113 1114 static int rv770_populate_smc_vddc_table(struct radeon_device *rdev, 1115 RV770_SMC_STATETABLE *table) 1116 { 1117 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1118 int i; 1119 1120 for (i = 0; i < pi->valid_vddc_entries; i++) { 1121 table->highSMIO[pi->vddc_table[i].vddc_index] = 1122 pi->vddc_table[i].high_smio; 1123 table->lowSMIO[pi->vddc_table[i].vddc_index] = 1124 cpu_to_be32(pi->vddc_table[i].low_smio); 1125 } 1126 1127 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0; 1128 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] = 1129 cpu_to_be32(pi->vddc_mask_low); 1130 1131 for (i = 0; 1132 ((i < pi->valid_vddc_entries) && 1133 (pi->max_vddc_in_table > 1134 pi->vddc_table[i].vddc)); 1135 i++); 1136 1137 table->maxVDDCIndexInPPTable = 1138 pi->vddc_table[i].vddc_index; 1139 1140 return 0; 1141 } 1142 1143 static int rv770_populate_smc_mvdd_table(struct radeon_device *rdev, 1144 RV770_SMC_STATETABLE *table) 1145 { 1146 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1147 1148 if (pi->mvdd_control) { 1149 table->lowSMIO[MVDD_HIGH_INDEX] |= 1150 cpu_to_be32(pi->mvdd_low_smio[MVDD_HIGH_INDEX]); 1151 table->lowSMIO[MVDD_LOW_INDEX] |= 1152 cpu_to_be32(pi->mvdd_low_smio[MVDD_LOW_INDEX]); 1153 1154 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0; 1155 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] = 1156 cpu_to_be32(pi->mvdd_mask_low); 1157 } 1158 1159 return 0; 1160 } 1161 1162 static int rv770_init_smc_table(struct radeon_device *rdev, 1163 struct radeon_ps *radeon_boot_state) 1164 { 1165 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1166 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state); 1167 RV770_SMC_STATETABLE *table = &pi->smc_statetable; 1168 int ret; 1169 1170 memset(table, 0, sizeof(RV770_SMC_STATETABLE)); 1171 1172 pi->boot_sclk = boot_state->low.sclk; 1173 1174 rv770_populate_smc_vddc_table(rdev, table); 1175 rv770_populate_smc_mvdd_table(rdev, table); 1176 1177 switch (rdev->pm.int_thermal_type) { 1178 case THERMAL_TYPE_RV770: 1179 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 1180 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; 1181 break; 1182 case THERMAL_TYPE_NONE: 1183 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; 1184 break; 1185 case THERMAL_TYPE_EXTERNAL_GPIO: 1186 default: 1187 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; 1188 break; 1189 } 1190 1191 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) { 1192 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 1193 1194 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT) 1195 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK; 1196 1197 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT) 1198 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE; 1199 } 1200 1201 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 1202 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 1203 1204 if (pi->mem_gddr5) 1205 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 1206 1207 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 1208 ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table); 1209 else 1210 ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table); 1211 if (ret) 1212 return ret; 1213 1214 if (rdev->family == CHIP_RV740) 1215 ret = rv740_populate_smc_acpi_state(rdev, table); 1216 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 1217 ret = rv730_populate_smc_acpi_state(rdev, table); 1218 else 1219 ret = rv770_populate_smc_acpi_state(rdev, table); 1220 if (ret) 1221 return ret; 1222 1223 table->driverState = table->initialState; 1224 1225 return rv770_copy_bytes_to_smc(rdev, 1226 pi->state_table_start, 1227 (const u8 *)table, 1228 sizeof(RV770_SMC_STATETABLE), 1229 pi->sram_end); 1230 } 1231 1232 static int rv770_construct_vddc_table(struct radeon_device *rdev) 1233 { 1234 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1235 u16 min, max, step; 1236 u32 steps = 0; 1237 u8 vddc_index = 0; 1238 u32 i; 1239 1240 radeon_atom_get_min_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &min); 1241 radeon_atom_get_max_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &max); 1242 radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &step); 1243 1244 steps = (max - min) / step + 1; 1245 1246 if (steps > MAX_NO_VREG_STEPS) 1247 return -EINVAL; 1248 1249 for (i = 0; i < steps; i++) { 1250 u32 gpio_pins, gpio_mask; 1251 1252 pi->vddc_table[i].vddc = (u16)(min + i * step); 1253 radeon_atom_get_voltage_gpio_settings(rdev, 1254 pi->vddc_table[i].vddc, 1255 SET_VOLTAGE_TYPE_ASIC_VDDC, 1256 &gpio_pins, &gpio_mask); 1257 pi->vddc_table[i].low_smio = gpio_pins & gpio_mask; 1258 pi->vddc_table[i].high_smio = 0; 1259 pi->vddc_mask_low = gpio_mask; 1260 if (i > 0) { 1261 if ((pi->vddc_table[i].low_smio != 1262 pi->vddc_table[i - 1].low_smio ) || 1263 (pi->vddc_table[i].high_smio != 1264 pi->vddc_table[i - 1].high_smio)) 1265 vddc_index++; 1266 } 1267 pi->vddc_table[i].vddc_index = vddc_index; 1268 } 1269 1270 pi->valid_vddc_entries = (u8)steps; 1271 1272 return 0; 1273 } 1274 1275 static u32 rv770_get_mclk_split_point(struct atom_memory_info *memory_info) 1276 { 1277 if (memory_info->mem_type == MEM_TYPE_GDDR3) 1278 return 30000; 1279 1280 return 0; 1281 } 1282 1283 static int rv770_get_mvdd_pin_configuration(struct radeon_device *rdev) 1284 { 1285 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1286 u32 gpio_pins, gpio_mask; 1287 1288 radeon_atom_get_voltage_gpio_settings(rdev, 1289 MVDD_HIGH_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC, 1290 &gpio_pins, &gpio_mask); 1291 pi->mvdd_mask_low = gpio_mask; 1292 pi->mvdd_low_smio[MVDD_HIGH_INDEX] = 1293 gpio_pins & gpio_mask; 1294 1295 radeon_atom_get_voltage_gpio_settings(rdev, 1296 MVDD_LOW_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC, 1297 &gpio_pins, &gpio_mask); 1298 pi->mvdd_low_smio[MVDD_LOW_INDEX] = 1299 gpio_pins & gpio_mask; 1300 1301 return 0; 1302 } 1303 1304 u8 rv770_get_memory_module_index(struct radeon_device *rdev) 1305 { 1306 return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); 1307 } 1308 1309 static int rv770_get_mvdd_configuration(struct radeon_device *rdev) 1310 { 1311 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1312 u8 memory_module_index; 1313 struct atom_memory_info memory_info; 1314 1315 memory_module_index = rv770_get_memory_module_index(rdev); 1316 1317 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) { 1318 pi->mvdd_control = false; 1319 return 0; 1320 } 1321 1322 pi->mvdd_split_frequency = 1323 rv770_get_mclk_split_point(&memory_info); 1324 1325 if (pi->mvdd_split_frequency == 0) { 1326 pi->mvdd_control = false; 1327 return 0; 1328 } 1329 1330 return rv770_get_mvdd_pin_configuration(rdev); 1331 } 1332 1333 void rv770_enable_voltage_control(struct radeon_device *rdev, 1334 bool enable) 1335 { 1336 if (enable) 1337 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); 1338 else 1339 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); 1340 } 1341 1342 static void rv770_program_display_gap(struct radeon_device *rdev) 1343 { 1344 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); 1345 1346 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); 1347 if (rdev->pm.dpm.new_active_crtcs & 1) { 1348 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1349 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1350 } else if (rdev->pm.dpm.new_active_crtcs & 2) { 1351 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1352 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); 1353 } else { 1354 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1355 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); 1356 } 1357 WREG32(CG_DISPLAY_GAP_CNTL, tmp); 1358 } 1359 1360 static void rv770_enable_dynamic_pcie_gen2(struct radeon_device *rdev, 1361 bool enable) 1362 { 1363 rv770_enable_bif_dynamic_pcie_gen2(rdev, enable); 1364 1365 if (enable) 1366 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); 1367 else 1368 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); 1369 } 1370 1371 static void r7xx_program_memory_timing_parameters(struct radeon_device *rdev, 1372 struct radeon_ps *radeon_new_state) 1373 { 1374 if ((rdev->family == CHIP_RV730) || 1375 (rdev->family == CHIP_RV710) || 1376 (rdev->family == CHIP_RV740)) 1377 rv730_program_memory_timing_parameters(rdev, radeon_new_state); 1378 else 1379 rv770_program_memory_timing_parameters(rdev, radeon_new_state); 1380 } 1381 1382 static int rv770_upload_sw_state(struct radeon_device *rdev, 1383 struct radeon_ps *radeon_new_state) 1384 { 1385 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1386 u16 address = pi->state_table_start + 1387 offsetof(RV770_SMC_STATETABLE, driverState); 1388 RV770_SMC_SWSTATE state = { 0 }; 1389 int ret; 1390 1391 ret = rv770_convert_power_state_to_smc(rdev, radeon_new_state, &state); 1392 if (ret) 1393 return ret; 1394 1395 return rv770_copy_bytes_to_smc(rdev, address, (const u8 *)&state, 1396 sizeof(RV770_SMC_SWSTATE), 1397 pi->sram_end); 1398 } 1399 1400 int rv770_halt_smc(struct radeon_device *rdev) 1401 { 1402 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK) 1403 return -EINVAL; 1404 1405 if (rv770_wait_for_smc_inactive(rdev) != PPSMC_Result_OK) 1406 return -EINVAL; 1407 1408 return 0; 1409 } 1410 1411 int rv770_resume_smc(struct radeon_device *rdev) 1412 { 1413 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Resume) != PPSMC_Result_OK) 1414 return -EINVAL; 1415 return 0; 1416 } 1417 1418 int rv770_set_sw_state(struct radeon_device *rdev) 1419 { 1420 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) 1421 return -EINVAL; 1422 return 0; 1423 } 1424 1425 int rv770_set_boot_state(struct radeon_device *rdev) 1426 { 1427 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) != PPSMC_Result_OK) 1428 return -EINVAL; 1429 return 0; 1430 } 1431 1432 void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, 1433 struct radeon_ps *new_ps, 1434 struct radeon_ps *old_ps) 1435 { 1436 struct rv7xx_ps *new_state = rv770_get_ps(new_ps); 1437 struct rv7xx_ps *current_state = rv770_get_ps(old_ps); 1438 1439 if ((new_ps->vclk == old_ps->vclk) && 1440 (new_ps->dclk == old_ps->dclk)) 1441 return; 1442 1443 if (new_state->high.sclk >= current_state->high.sclk) 1444 return; 1445 1446 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk); 1447 } 1448 1449 void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, 1450 struct radeon_ps *new_ps, 1451 struct radeon_ps *old_ps) 1452 { 1453 struct rv7xx_ps *new_state = rv770_get_ps(new_ps); 1454 struct rv7xx_ps *current_state = rv770_get_ps(old_ps); 1455 1456 if ((new_ps->vclk == old_ps->vclk) && 1457 (new_ps->dclk == old_ps->dclk)) 1458 return; 1459 1460 if (new_state->high.sclk < current_state->high.sclk) 1461 return; 1462 1463 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk); 1464 } 1465 1466 int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev) 1467 { 1468 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK) 1469 return -EINVAL; 1470 1471 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled)) != PPSMC_Result_OK) 1472 return -EINVAL; 1473 1474 return 0; 1475 } 1476 1477 int rv770_dpm_force_performance_level(struct radeon_device *rdev, 1478 enum radeon_dpm_forced_level level) 1479 { 1480 PPSMC_Msg msg; 1481 1482 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1483 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ZeroLevelsDisabled) != PPSMC_Result_OK) 1484 return -EINVAL; 1485 msg = PPSMC_MSG_ForceHigh; 1486 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1487 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) 1488 return -EINVAL; 1489 msg = (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled); 1490 } else { 1491 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) 1492 return -EINVAL; 1493 msg = (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled); 1494 } 1495 1496 if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK) 1497 return -EINVAL; 1498 1499 rdev->pm.dpm.forced_level = level; 1500 1501 return 0; 1502 } 1503 1504 void r7xx_start_smc(struct radeon_device *rdev) 1505 { 1506 rv770_start_smc(rdev); 1507 rv770_start_smc_clock(rdev); 1508 } 1509 1510 1511 void r7xx_stop_smc(struct radeon_device *rdev) 1512 { 1513 rv770_reset_smc(rdev); 1514 rv770_stop_smc_clock(rdev); 1515 } 1516 1517 static void rv770_read_clock_registers(struct radeon_device *rdev) 1518 { 1519 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1520 1521 pi->clk_regs.rv770.cg_spll_func_cntl = 1522 RREG32(CG_SPLL_FUNC_CNTL); 1523 pi->clk_regs.rv770.cg_spll_func_cntl_2 = 1524 RREG32(CG_SPLL_FUNC_CNTL_2); 1525 pi->clk_regs.rv770.cg_spll_func_cntl_3 = 1526 RREG32(CG_SPLL_FUNC_CNTL_3); 1527 pi->clk_regs.rv770.cg_spll_spread_spectrum = 1528 RREG32(CG_SPLL_SPREAD_SPECTRUM); 1529 pi->clk_regs.rv770.cg_spll_spread_spectrum_2 = 1530 RREG32(CG_SPLL_SPREAD_SPECTRUM_2); 1531 pi->clk_regs.rv770.mpll_ad_func_cntl = 1532 RREG32(MPLL_AD_FUNC_CNTL); 1533 pi->clk_regs.rv770.mpll_ad_func_cntl_2 = 1534 RREG32(MPLL_AD_FUNC_CNTL_2); 1535 pi->clk_regs.rv770.mpll_dq_func_cntl = 1536 RREG32(MPLL_DQ_FUNC_CNTL); 1537 pi->clk_regs.rv770.mpll_dq_func_cntl_2 = 1538 RREG32(MPLL_DQ_FUNC_CNTL_2); 1539 pi->clk_regs.rv770.mclk_pwrmgt_cntl = 1540 RREG32(MCLK_PWRMGT_CNTL); 1541 pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL); 1542 } 1543 1544 static void r7xx_read_clock_registers(struct radeon_device *rdev) 1545 { 1546 if (rdev->family == CHIP_RV740) 1547 rv740_read_clock_registers(rdev); 1548 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 1549 rv730_read_clock_registers(rdev); 1550 else 1551 rv770_read_clock_registers(rdev); 1552 } 1553 1554 void rv770_read_voltage_smio_registers(struct radeon_device *rdev) 1555 { 1556 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1557 1558 pi->s0_vid_lower_smio_cntl = 1559 RREG32(S0_VID_LOWER_SMIO_CNTL); 1560 } 1561 1562 void rv770_reset_smio_status(struct radeon_device *rdev) 1563 { 1564 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1565 u32 sw_smio_index, vid_smio_cntl; 1566 1567 sw_smio_index = 1568 (RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT; 1569 switch (sw_smio_index) { 1570 case 3: 1571 vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL); 1572 break; 1573 case 2: 1574 vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL); 1575 break; 1576 case 1: 1577 vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL); 1578 break; 1579 case 0: 1580 return; 1581 default: 1582 vid_smio_cntl = pi->s0_vid_lower_smio_cntl; 1583 break; 1584 } 1585 1586 WREG32(S0_VID_LOWER_SMIO_CNTL, vid_smio_cntl); 1587 WREG32_P(GENERAL_PWRMGT, SW_SMIO_INDEX(0), ~SW_SMIO_INDEX_MASK); 1588 } 1589 1590 void rv770_get_memory_type(struct radeon_device *rdev) 1591 { 1592 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1593 u32 tmp; 1594 1595 tmp = RREG32(MC_SEQ_MISC0); 1596 1597 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == 1598 MC_SEQ_MISC0_GDDR5_VALUE) 1599 pi->mem_gddr5 = true; 1600 else 1601 pi->mem_gddr5 = false; 1602 1603 } 1604 1605 void rv770_get_pcie_gen2_status(struct radeon_device *rdev) 1606 { 1607 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1608 u32 tmp; 1609 1610 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 1611 1612 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) && 1613 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) 1614 pi->pcie_gen2 = true; 1615 else 1616 pi->pcie_gen2 = false; 1617 1618 if (pi->pcie_gen2) { 1619 if (tmp & LC_CURRENT_DATA_RATE) 1620 pi->boot_in_gen2 = true; 1621 else 1622 pi->boot_in_gen2 = false; 1623 } else 1624 pi->boot_in_gen2 = false; 1625 } 1626 1627 #if 0 1628 static int rv770_enter_ulp_state(struct radeon_device *rdev) 1629 { 1630 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1631 1632 if (pi->gfx_clock_gating) { 1633 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); 1634 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); 1635 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); 1636 RREG32(GB_TILING_CONFIG); 1637 } 1638 1639 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower), 1640 ~HOST_SMC_MSG_MASK); 1641 1642 udelay(7000); 1643 1644 return 0; 1645 } 1646 1647 static int rv770_exit_ulp_state(struct radeon_device *rdev) 1648 { 1649 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1650 int i; 1651 1652 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_ResumeFromMinimumPower), 1653 ~HOST_SMC_MSG_MASK); 1654 1655 udelay(7000); 1656 1657 for (i = 0; i < rdev->usec_timeout; i++) { 1658 if (((RREG32(SMC_MSG) & HOST_SMC_RESP_MASK) >> HOST_SMC_RESP_SHIFT) == 1) 1659 break; 1660 udelay(1000); 1661 } 1662 1663 if (pi->gfx_clock_gating) 1664 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); 1665 1666 return 0; 1667 } 1668 #endif 1669 1670 static void rv770_get_mclk_odt_threshold(struct radeon_device *rdev) 1671 { 1672 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1673 u8 memory_module_index; 1674 struct atom_memory_info memory_info; 1675 1676 pi->mclk_odt_threshold = 0; 1677 1678 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) { 1679 memory_module_index = rv770_get_memory_module_index(rdev); 1680 1681 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) 1682 return; 1683 1684 if (memory_info.mem_type == MEM_TYPE_DDR2 || 1685 memory_info.mem_type == MEM_TYPE_DDR3) 1686 pi->mclk_odt_threshold = 30000; 1687 } 1688 } 1689 1690 void rv770_get_max_vddc(struct radeon_device *rdev) 1691 { 1692 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1693 u16 vddc; 1694 1695 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc)) 1696 pi->max_vddc = 0; 1697 else 1698 pi->max_vddc = vddc; 1699 } 1700 1701 void rv770_program_response_times(struct radeon_device *rdev) 1702 { 1703 u32 voltage_response_time, backbias_response_time; 1704 u32 acpi_delay_time, vbi_time_out; 1705 u32 vddc_dly, bb_dly, acpi_dly, vbi_dly; 1706 u32 reference_clock; 1707 1708 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time; 1709 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time; 1710 1711 if (voltage_response_time == 0) 1712 voltage_response_time = 1000; 1713 1714 if (backbias_response_time == 0) 1715 backbias_response_time = 1000; 1716 1717 acpi_delay_time = 15000; 1718 vbi_time_out = 100000; 1719 1720 reference_clock = radeon_get_xclk(rdev); 1721 1722 vddc_dly = (voltage_response_time * reference_clock) / 1600; 1723 bb_dly = (backbias_response_time * reference_clock) / 1600; 1724 acpi_dly = (acpi_delay_time * reference_clock) / 1600; 1725 vbi_dly = (vbi_time_out * reference_clock) / 1600; 1726 1727 rv770_write_smc_soft_register(rdev, 1728 RV770_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); 1729 rv770_write_smc_soft_register(rdev, 1730 RV770_SMC_SOFT_REGISTER_delay_bbias, bb_dly); 1731 rv770_write_smc_soft_register(rdev, 1732 RV770_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); 1733 rv770_write_smc_soft_register(rdev, 1734 RV770_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); 1735 #if 0 1736 /* XXX look up hw revision */ 1737 if (WEKIVA_A21) 1738 rv770_write_smc_soft_register(rdev, 1739 RV770_SMC_SOFT_REGISTER_baby_step_timer, 1740 0x10); 1741 #endif 1742 } 1743 1744 static void rv770_program_dcodt_before_state_switch(struct radeon_device *rdev, 1745 struct radeon_ps *radeon_new_state, 1746 struct radeon_ps *radeon_current_state) 1747 { 1748 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1749 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state); 1750 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state); 1751 bool current_use_dc = false; 1752 bool new_use_dc = false; 1753 1754 if (pi->mclk_odt_threshold == 0) 1755 return; 1756 1757 if (current_state->high.mclk <= pi->mclk_odt_threshold) 1758 current_use_dc = true; 1759 1760 if (new_state->high.mclk <= pi->mclk_odt_threshold) 1761 new_use_dc = true; 1762 1763 if (current_use_dc == new_use_dc) 1764 return; 1765 1766 if (!current_use_dc && new_use_dc) 1767 return; 1768 1769 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 1770 rv730_program_dcodt(rdev, new_use_dc); 1771 } 1772 1773 static void rv770_program_dcodt_after_state_switch(struct radeon_device *rdev, 1774 struct radeon_ps *radeon_new_state, 1775 struct radeon_ps *radeon_current_state) 1776 { 1777 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1778 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state); 1779 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state); 1780 bool current_use_dc = false; 1781 bool new_use_dc = false; 1782 1783 if (pi->mclk_odt_threshold == 0) 1784 return; 1785 1786 if (current_state->high.mclk <= pi->mclk_odt_threshold) 1787 current_use_dc = true; 1788 1789 if (new_state->high.mclk <= pi->mclk_odt_threshold) 1790 new_use_dc = true; 1791 1792 if (current_use_dc == new_use_dc) 1793 return; 1794 1795 if (current_use_dc && !new_use_dc) 1796 return; 1797 1798 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 1799 rv730_program_dcodt(rdev, new_use_dc); 1800 } 1801 1802 static void rv770_retrieve_odt_values(struct radeon_device *rdev) 1803 { 1804 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1805 1806 if (pi->mclk_odt_threshold == 0) 1807 return; 1808 1809 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 1810 rv730_get_odt_values(rdev); 1811 } 1812 1813 static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) 1814 { 1815 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1816 bool want_thermal_protection; 1817 enum radeon_dpm_event_src dpm_event_src; 1818 1819 switch (sources) { 1820 case 0: 1821 default: 1822 want_thermal_protection = false; 1823 break; 1824 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): 1825 want_thermal_protection = true; 1826 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; 1827 break; 1828 1829 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 1830 want_thermal_protection = true; 1831 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; 1832 break; 1833 1834 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 1835 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): 1836 want_thermal_protection = true; 1837 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 1838 break; 1839 } 1840 1841 if (want_thermal_protection) { 1842 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); 1843 if (pi->thermal_protection) 1844 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); 1845 } else { 1846 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); 1847 } 1848 } 1849 1850 void rv770_enable_auto_throttle_source(struct radeon_device *rdev, 1851 enum radeon_dpm_auto_throttle_src source, 1852 bool enable) 1853 { 1854 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1855 1856 if (enable) { 1857 if (!(pi->active_auto_throttle_sources & (1 << source))) { 1858 pi->active_auto_throttle_sources |= 1 << source; 1859 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1860 } 1861 } else { 1862 if (pi->active_auto_throttle_sources & (1 << source)) { 1863 pi->active_auto_throttle_sources &= ~(1 << source); 1864 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1865 } 1866 } 1867 } 1868 1869 static int rv770_set_thermal_temperature_range(struct radeon_device *rdev, 1870 int min_temp, int max_temp) 1871 { 1872 int low_temp = 0 * 1000; 1873 int high_temp = 255 * 1000; 1874 1875 if (low_temp < min_temp) 1876 low_temp = min_temp; 1877 if (high_temp > max_temp) 1878 high_temp = max_temp; 1879 if (high_temp < low_temp) { 1880 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 1881 return -EINVAL; 1882 } 1883 1884 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); 1885 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); 1886 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); 1887 1888 rdev->pm.dpm.thermal.min_temp = low_temp; 1889 rdev->pm.dpm.thermal.max_temp = high_temp; 1890 1891 return 0; 1892 } 1893 1894 int rv770_dpm_enable(struct radeon_device *rdev) 1895 { 1896 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 1897 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 1898 int ret; 1899 1900 if (pi->gfx_clock_gating) 1901 rv770_restore_cgcg(rdev); 1902 1903 if (rv770_dpm_enabled(rdev)) 1904 return -EINVAL; 1905 1906 if (pi->voltage_control) { 1907 rv770_enable_voltage_control(rdev, true); 1908 ret = rv770_construct_vddc_table(rdev); 1909 if (ret) { 1910 DRM_ERROR("rv770_construct_vddc_table failed\n"); 1911 return ret; 1912 } 1913 } 1914 1915 if (pi->dcodt) 1916 rv770_retrieve_odt_values(rdev); 1917 1918 if (pi->mvdd_control) { 1919 ret = rv770_get_mvdd_configuration(rdev); 1920 if (ret) { 1921 DRM_ERROR("rv770_get_mvdd_configuration failed\n"); 1922 return ret; 1923 } 1924 } 1925 1926 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS) 1927 rv770_enable_backbias(rdev, true); 1928 1929 rv770_enable_spread_spectrum(rdev, true); 1930 1931 if (pi->thermal_protection) 1932 rv770_enable_thermal_protection(rdev, true); 1933 1934 rv770_program_mpll_timing_parameters(rdev); 1935 rv770_setup_bsp(rdev); 1936 rv770_program_git(rdev); 1937 rv770_program_tp(rdev); 1938 rv770_program_tpp(rdev); 1939 rv770_program_sstp(rdev); 1940 rv770_program_engine_speed_parameters(rdev); 1941 rv770_enable_display_gap(rdev); 1942 rv770_program_vc(rdev); 1943 1944 if (pi->dynamic_pcie_gen2) 1945 rv770_enable_dynamic_pcie_gen2(rdev, true); 1946 1947 ret = rv770_upload_firmware(rdev); 1948 if (ret) { 1949 DRM_ERROR("rv770_upload_firmware failed\n"); 1950 return ret; 1951 } 1952 ret = rv770_init_smc_table(rdev, boot_ps); 1953 if (ret) { 1954 DRM_ERROR("rv770_init_smc_table failed\n"); 1955 return ret; 1956 } 1957 1958 rv770_program_response_times(rdev); 1959 r7xx_start_smc(rdev); 1960 1961 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 1962 rv730_start_dpm(rdev); 1963 else 1964 rv770_start_dpm(rdev); 1965 1966 if (pi->gfx_clock_gating) 1967 rv770_gfx_clock_gating_enable(rdev, true); 1968 1969 if (pi->mg_clock_gating) 1970 rv770_mg_clock_gating_enable(rdev, true); 1971 1972 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 1973 1974 return 0; 1975 } 1976 1977 int rv770_dpm_late_enable(struct radeon_device *rdev) 1978 { 1979 int ret; 1980 1981 if (rdev->irq.installed && 1982 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1983 PPSMC_Result result; 1984 1985 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1986 if (ret) 1987 return ret; 1988 rdev->irq.dpm_thermal = true; 1989 radeon_irq_set(rdev); 1990 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); 1991 1992 if (result != PPSMC_Result_OK) 1993 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 1994 } 1995 1996 return 0; 1997 } 1998 1999 void rv770_dpm_disable(struct radeon_device *rdev) 2000 { 2001 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2002 2003 if (!rv770_dpm_enabled(rdev)) 2004 return; 2005 2006 rv770_clear_vc(rdev); 2007 2008 if (pi->thermal_protection) 2009 rv770_enable_thermal_protection(rdev, false); 2010 2011 rv770_enable_spread_spectrum(rdev, false); 2012 2013 if (pi->dynamic_pcie_gen2) 2014 rv770_enable_dynamic_pcie_gen2(rdev, false); 2015 2016 if (rdev->irq.installed && 2017 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 2018 rdev->irq.dpm_thermal = false; 2019 radeon_irq_set(rdev); 2020 } 2021 2022 if (pi->gfx_clock_gating) 2023 rv770_gfx_clock_gating_enable(rdev, false); 2024 2025 if (pi->mg_clock_gating) 2026 rv770_mg_clock_gating_enable(rdev, false); 2027 2028 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) 2029 rv730_stop_dpm(rdev); 2030 else 2031 rv770_stop_dpm(rdev); 2032 2033 r7xx_stop_smc(rdev); 2034 rv770_reset_smio_status(rdev); 2035 } 2036 2037 int rv770_dpm_set_power_state(struct radeon_device *rdev) 2038 { 2039 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2040 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps; 2041 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; 2042 int ret; 2043 2044 ret = rv770_restrict_performance_levels_before_switch(rdev); 2045 if (ret) { 2046 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n"); 2047 return ret; 2048 } 2049 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 2050 ret = rv770_halt_smc(rdev); 2051 if (ret) { 2052 DRM_ERROR("rv770_halt_smc failed\n"); 2053 return ret; 2054 } 2055 ret = rv770_upload_sw_state(rdev, new_ps); 2056 if (ret) { 2057 DRM_ERROR("rv770_upload_sw_state failed\n"); 2058 return ret; 2059 } 2060 r7xx_program_memory_timing_parameters(rdev, new_ps); 2061 if (pi->dcodt) 2062 rv770_program_dcodt_before_state_switch(rdev, new_ps, old_ps); 2063 ret = rv770_resume_smc(rdev); 2064 if (ret) { 2065 DRM_ERROR("rv770_resume_smc failed\n"); 2066 return ret; 2067 } 2068 ret = rv770_set_sw_state(rdev); 2069 if (ret) { 2070 DRM_ERROR("rv770_set_sw_state failed\n"); 2071 return ret; 2072 } 2073 if (pi->dcodt) 2074 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); 2075 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 2076 2077 return 0; 2078 } 2079 2080 #if 0 2081 void rv770_dpm_reset_asic(struct radeon_device *rdev) 2082 { 2083 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2084 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 2085 2086 rv770_restrict_performance_levels_before_switch(rdev); 2087 if (pi->dcodt) 2088 rv770_program_dcodt_before_state_switch(rdev, boot_ps, boot_ps); 2089 rv770_set_boot_state(rdev); 2090 if (pi->dcodt) 2091 rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps); 2092 } 2093 #endif 2094 2095 void rv770_dpm_setup_asic(struct radeon_device *rdev) 2096 { 2097 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2098 2099 r7xx_read_clock_registers(rdev); 2100 rv770_read_voltage_smio_registers(rdev); 2101 rv770_get_memory_type(rdev); 2102 if (pi->dcodt) 2103 rv770_get_mclk_odt_threshold(rdev); 2104 rv770_get_pcie_gen2_status(rdev); 2105 2106 rv770_enable_acpi_pm(rdev); 2107 2108 if (radeon_aspm != 0) { 2109 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) 2110 rv770_enable_l0s(rdev); 2111 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) 2112 rv770_enable_l1(rdev); 2113 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) 2114 rv770_enable_pll_sleep_in_l1(rdev); 2115 } 2116 } 2117 2118 void rv770_dpm_display_configuration_changed(struct radeon_device *rdev) 2119 { 2120 rv770_program_display_gap(rdev); 2121 } 2122 2123 union power_info { 2124 struct _ATOM_POWERPLAY_INFO info; 2125 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2126 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2127 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2128 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2129 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2130 }; 2131 2132 union pplib_clock_info { 2133 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2134 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2135 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2136 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2137 }; 2138 2139 union pplib_power_state { 2140 struct _ATOM_PPLIB_STATE v1; 2141 struct _ATOM_PPLIB_STATE_V2 v2; 2142 }; 2143 2144 static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev, 2145 struct radeon_ps *rps, 2146 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2147 u8 table_rev) 2148 { 2149 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2150 rps->class = le16_to_cpu(non_clock_info->usClassification); 2151 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2152 2153 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2154 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2155 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2156 } else { 2157 rps->vclk = 0; 2158 rps->dclk = 0; 2159 } 2160 2161 if (r600_is_uvd_state(rps->class, rps->class2)) { 2162 if ((rps->vclk == 0) || (rps->dclk == 0)) { 2163 rps->vclk = RV770_DEFAULT_VCLK_FREQ; 2164 rps->dclk = RV770_DEFAULT_DCLK_FREQ; 2165 } 2166 } 2167 2168 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 2169 rdev->pm.dpm.boot_ps = rps; 2170 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2171 rdev->pm.dpm.uvd_ps = rps; 2172 } 2173 2174 static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, 2175 struct radeon_ps *rps, int index, 2176 union pplib_clock_info *clock_info) 2177 { 2178 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2179 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2180 struct rv7xx_ps *ps = rv770_get_ps(rps); 2181 u32 sclk, mclk; 2182 struct rv7xx_pl *pl; 2183 2184 switch (index) { 2185 case 0: 2186 pl = &ps->low; 2187 break; 2188 case 1: 2189 pl = &ps->medium; 2190 break; 2191 case 2: 2192 default: 2193 pl = &ps->high; 2194 break; 2195 } 2196 2197 if (rdev->family >= CHIP_CEDAR) { 2198 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); 2199 sclk |= clock_info->evergreen.ucEngineClockHigh << 16; 2200 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); 2201 mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; 2202 2203 pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC); 2204 pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); 2205 pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags); 2206 } else { 2207 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); 2208 sclk |= clock_info->r600.ucEngineClockHigh << 16; 2209 mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); 2210 mclk |= clock_info->r600.ucMemoryClockHigh << 16; 2211 2212 pl->vddc = le16_to_cpu(clock_info->r600.usVDDC); 2213 pl->flags = le32_to_cpu(clock_info->r600.ulFlags); 2214 } 2215 2216 pl->mclk = mclk; 2217 pl->sclk = sclk; 2218 2219 /* patch up vddc if necessary */ 2220 if (pl->vddc == 0xff01) { 2221 if (pi->max_vddc) 2222 pl->vddc = pi->max_vddc; 2223 } 2224 2225 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 2226 pi->acpi_vddc = pl->vddc; 2227 if (rdev->family >= CHIP_CEDAR) 2228 eg_pi->acpi_vddci = pl->vddci; 2229 if (ps->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) 2230 pi->acpi_pcie_gen2 = true; 2231 else 2232 pi->acpi_pcie_gen2 = false; 2233 } 2234 2235 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 2236 if (rdev->family >= CHIP_BARTS) { 2237 eg_pi->ulv.supported = true; 2238 eg_pi->ulv.pl = pl; 2239 } 2240 } 2241 2242 if (pi->min_vddc_in_table > pl->vddc) 2243 pi->min_vddc_in_table = pl->vddc; 2244 2245 if (pi->max_vddc_in_table < pl->vddc) 2246 pi->max_vddc_in_table = pl->vddc; 2247 2248 /* patch up boot state */ 2249 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2250 u16 vddc, vddci, mvdd; 2251 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd); 2252 pl->mclk = rdev->clock.default_mclk; 2253 pl->sclk = rdev->clock.default_sclk; 2254 pl->vddc = vddc; 2255 pl->vddci = vddci; 2256 } 2257 2258 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2259 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 2260 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; 2261 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; 2262 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; 2263 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; 2264 } 2265 } 2266 2267 int rv7xx_parse_power_table(struct radeon_device *rdev) 2268 { 2269 struct radeon_mode_info *mode_info = &rdev->mode_info; 2270 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2271 union pplib_power_state *power_state; 2272 int i, j; 2273 union pplib_clock_info *clock_info; 2274 union power_info *power_info; 2275 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2276 u16 data_offset; 2277 u8 frev, crev; 2278 struct rv7xx_ps *ps; 2279 2280 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2281 &frev, &crev, &data_offset)) 2282 return -EINVAL; 2283 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2284 2285 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 2286 power_info->pplib.ucNumStates, GFP_KERNEL); 2287 if (!rdev->pm.dpm.ps) 2288 return -ENOMEM; 2289 2290 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 2291 power_state = (union pplib_power_state *) 2292 (mode_info->atom_context->bios + data_offset + 2293 le16_to_cpu(power_info->pplib.usStateArrayOffset) + 2294 i * power_info->pplib.ucStateEntrySize); 2295 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2296 (mode_info->atom_context->bios + data_offset + 2297 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + 2298 (power_state->v1.ucNonClockStateIndex * 2299 power_info->pplib.ucNonClockSize)); 2300 if (power_info->pplib.ucStateEntrySize - 1) { 2301 u8 *idx; 2302 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL); 2303 if (ps == NULL) { 2304 kfree(rdev->pm.dpm.ps); 2305 return -ENOMEM; 2306 } 2307 rdev->pm.dpm.ps[i].ps_priv = ps; 2308 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2309 non_clock_info, 2310 power_info->pplib.ucNonClockSize); 2311 idx = (u8 *)&power_state->v1.ucClockStateIndices[0]; 2312 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2313 clock_info = (union pplib_clock_info *) 2314 (mode_info->atom_context->bios + data_offset + 2315 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2316 (idx[j] * power_info->pplib.ucClockInfoSize)); 2317 rv7xx_parse_pplib_clock_info(rdev, 2318 &rdev->pm.dpm.ps[i], j, 2319 clock_info); 2320 } 2321 } 2322 } 2323 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates; 2324 return 0; 2325 } 2326 2327 void rv770_get_engine_memory_ss(struct radeon_device *rdev) 2328 { 2329 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 2330 struct radeon_atom_ss ss; 2331 2332 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2333 ASIC_INTERNAL_ENGINE_SS, 0); 2334 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2335 ASIC_INTERNAL_MEMORY_SS, 0); 2336 2337 if (pi->sclk_ss || pi->mclk_ss) 2338 pi->dynamic_ss = true; 2339 else 2340 pi->dynamic_ss = false; 2341 } 2342 2343 int rv770_dpm_init(struct radeon_device *rdev) 2344 { 2345 struct rv7xx_power_info *pi; 2346 struct atom_clock_dividers dividers; 2347 int ret; 2348 2349 pi = kzalloc(sizeof(struct rv7xx_power_info), GFP_KERNEL); 2350 if (pi == NULL) 2351 return -ENOMEM; 2352 rdev->pm.dpm.priv = pi; 2353 2354 rv770_get_max_vddc(rdev); 2355 2356 pi->acpi_vddc = 0; 2357 pi->min_vddc_in_table = 0; 2358 pi->max_vddc_in_table = 0; 2359 2360 ret = r600_get_platform_caps(rdev); 2361 if (ret) 2362 return ret; 2363 2364 ret = rv7xx_parse_power_table(rdev); 2365 if (ret) 2366 return ret; 2367 2368 if (rdev->pm.dpm.voltage_response_time == 0) 2369 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; 2370 if (rdev->pm.dpm.backbias_response_time == 0) 2371 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; 2372 2373 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 2374 0, false, ÷rs); 2375 if (ret) 2376 pi->ref_div = dividers.ref_div + 1; 2377 else 2378 pi->ref_div = R600_REFERENCEDIVIDER_DFLT; 2379 2380 pi->mclk_strobe_mode_threshold = 30000; 2381 pi->mclk_edc_enable_threshold = 30000; 2382 2383 pi->rlp = RV770_RLP_DFLT; 2384 pi->rmp = RV770_RMP_DFLT; 2385 pi->lhp = RV770_LHP_DFLT; 2386 pi->lmp = RV770_LMP_DFLT; 2387 2388 pi->voltage_control = 2389 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0); 2390 2391 pi->mvdd_control = 2392 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); 2393 2394 rv770_get_engine_memory_ss(rdev); 2395 2396 pi->asi = RV770_ASI_DFLT; 2397 pi->pasi = RV770_HASI_DFLT; 2398 pi->vrc = RV770_VRC_DFLT; 2399 2400 pi->power_gating = false; 2401 2402 pi->gfx_clock_gating = true; 2403 2404 pi->mg_clock_gating = true; 2405 pi->mgcgtssm = true; 2406 2407 pi->dynamic_pcie_gen2 = true; 2408 2409 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) 2410 pi->thermal_protection = true; 2411 else 2412 pi->thermal_protection = false; 2413 2414 pi->display_gap = true; 2415 2416 if (rdev->flags & RADEON_IS_MOBILITY) 2417 pi->dcodt = true; 2418 else 2419 pi->dcodt = false; 2420 2421 pi->ulps = true; 2422 2423 pi->mclk_stutter_mode_threshold = 0; 2424 2425 pi->sram_end = SMC_RAM_END; 2426 pi->state_table_start = RV770_SMC_TABLE_ADDRESS; 2427 pi->soft_regs_start = RV770_SMC_SOFT_REGISTERS_START; 2428 2429 return 0; 2430 } 2431 2432 void rv770_dpm_print_power_state(struct radeon_device *rdev, 2433 struct radeon_ps *rps) 2434 { 2435 struct rv7xx_ps *ps = rv770_get_ps(rps); 2436 struct rv7xx_pl *pl; 2437 2438 r600_dpm_print_class_info(rps->class, rps->class2); 2439 r600_dpm_print_cap_info(rps->caps); 2440 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2441 if (rdev->family >= CHIP_CEDAR) { 2442 pl = &ps->low; 2443 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u vddci: %u\n", 2444 pl->sclk, pl->mclk, pl->vddc, pl->vddci); 2445 pl = &ps->medium; 2446 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u vddci: %u\n", 2447 pl->sclk, pl->mclk, pl->vddc, pl->vddci); 2448 pl = &ps->high; 2449 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u vddci: %u\n", 2450 pl->sclk, pl->mclk, pl->vddc, pl->vddci); 2451 } else { 2452 pl = &ps->low; 2453 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u\n", 2454 pl->sclk, pl->mclk, pl->vddc); 2455 pl = &ps->medium; 2456 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u\n", 2457 pl->sclk, pl->mclk, pl->vddc); 2458 pl = &ps->high; 2459 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u\n", 2460 pl->sclk, pl->mclk, pl->vddc); 2461 } 2462 r600_dpm_print_ps_status(rdev, rps); 2463 } 2464 2465 void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2466 struct seq_file *m) 2467 { 2468 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 2469 struct rv7xx_ps *ps = rv770_get_ps(rps); 2470 struct rv7xx_pl *pl; 2471 u32 current_index = 2472 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> 2473 CURRENT_PROFILE_INDEX_SHIFT; 2474 2475 if (current_index > 2) { 2476 seq_printf(m, "invalid dpm profile %d\n", current_index); 2477 } else { 2478 if (current_index == 0) 2479 pl = &ps->low; 2480 else if (current_index == 1) 2481 pl = &ps->medium; 2482 else /* current_index == 2 */ 2483 pl = &ps->high; 2484 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2485 if (rdev->family >= CHIP_CEDAR) { 2486 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", 2487 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); 2488 } else { 2489 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", 2490 current_index, pl->sclk, pl->mclk, pl->vddc); 2491 } 2492 } 2493 } 2494 2495 void rv770_dpm_fini(struct radeon_device *rdev) 2496 { 2497 int i; 2498 2499 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2500 kfree(rdev->pm.dpm.ps[i].ps_priv); 2501 } 2502 kfree(rdev->pm.dpm.ps); 2503 kfree(rdev->pm.dpm.priv); 2504 } 2505 2506 u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low) 2507 { 2508 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps); 2509 2510 if (low) 2511 return requested_state->low.sclk; 2512 else 2513 return requested_state->high.sclk; 2514 } 2515 2516 u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) 2517 { 2518 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps); 2519 2520 if (low) 2521 return requested_state->low.mclk; 2522 else 2523 return requested_state->high.mclk; 2524 } 2525 2526 bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) 2527 { 2528 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2529 u32 switch_limit = 200; /* 300 */ 2530 2531 /* RV770 */ 2532 /* mclk switching doesn't seem to work reliably on desktop RV770s */ 2533 if ((rdev->family == CHIP_RV770) && 2534 !(rdev->flags & RADEON_IS_MOBILITY)) 2535 switch_limit = 0xffffffff; /* disable mclk switching */ 2536 2537 if (vblank_time < switch_limit) 2538 return true; 2539 else 2540 return false; 2541 2542 } 2543