1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "drmP.h" 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "r600d.h" 29 #include "r600_dpm.h" 30 #include "atom.h" 31 32 const u32 r600_utc[R600_PM_NUMBER_OF_TC] = 33 { 34 R600_UTC_DFLT_00, 35 R600_UTC_DFLT_01, 36 R600_UTC_DFLT_02, 37 R600_UTC_DFLT_03, 38 R600_UTC_DFLT_04, 39 R600_UTC_DFLT_05, 40 R600_UTC_DFLT_06, 41 R600_UTC_DFLT_07, 42 R600_UTC_DFLT_08, 43 R600_UTC_DFLT_09, 44 R600_UTC_DFLT_10, 45 R600_UTC_DFLT_11, 46 R600_UTC_DFLT_12, 47 R600_UTC_DFLT_13, 48 R600_UTC_DFLT_14, 49 }; 50 51 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = 52 { 53 R600_DTC_DFLT_00, 54 R600_DTC_DFLT_01, 55 R600_DTC_DFLT_02, 56 R600_DTC_DFLT_03, 57 R600_DTC_DFLT_04, 58 R600_DTC_DFLT_05, 59 R600_DTC_DFLT_06, 60 R600_DTC_DFLT_07, 61 R600_DTC_DFLT_08, 62 R600_DTC_DFLT_09, 63 R600_DTC_DFLT_10, 64 R600_DTC_DFLT_11, 65 R600_DTC_DFLT_12, 66 R600_DTC_DFLT_13, 67 R600_DTC_DFLT_14, 68 }; 69 70 void r600_dpm_print_class_info(u32 class, u32 class2) 71 { 72 printk("\tui class: "); 73 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 74 case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 75 default: 76 printk("none\n"); 77 break; 78 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 79 printk("battery\n"); 80 break; 81 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 82 printk("balanced\n"); 83 break; 84 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 85 printk("performance\n"); 86 break; 87 } 88 printk("\tinternal class: "); 89 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 90 (class2 == 0)) 91 printk("none"); 92 else { 93 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 94 printk("boot "); 95 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 96 printk("thermal "); 97 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 98 printk("limited_pwr "); 99 if (class & ATOM_PPLIB_CLASSIFICATION_REST) 100 printk("rest "); 101 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 102 printk("forced "); 103 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 104 printk("3d_perf "); 105 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 106 printk("ovrdrv "); 107 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 108 printk("uvd "); 109 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 110 printk("3d_low "); 111 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 112 printk("acpi "); 113 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 114 printk("uvd_hd2 "); 115 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 116 printk("uvd_hd "); 117 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 118 printk("uvd_sd "); 119 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 120 printk("limited_pwr2 "); 121 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 122 printk("ulv "); 123 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 124 printk("uvd_mvc "); 125 } 126 printk("\n"); 127 } 128 129 void r600_dpm_print_cap_info(u32 caps) 130 { 131 printk("\tcaps: "); 132 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 133 printk("single_disp "); 134 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 135 printk("video "); 136 if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 137 printk("no_dc "); 138 printk("\n"); 139 } 140 141 void r600_dpm_print_ps_status(struct radeon_device *rdev, 142 struct radeon_ps *rps) 143 { 144 printk("\tstatus: "); 145 if (rps == rdev->pm.dpm.current_ps) 146 printk("c "); 147 if (rps == rdev->pm.dpm.requested_ps) 148 printk("r "); 149 if (rps == rdev->pm.dpm.boot_ps) 150 printk("b "); 151 printk("\n"); 152 } 153 154 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) 155 { 156 struct drm_device *dev = rdev->ddev; 157 struct drm_crtc *crtc; 158 struct radeon_crtc *radeon_crtc; 159 u32 line_time_us, vblank_lines; 160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 161 162 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 163 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 164 radeon_crtc = to_radeon_crtc(crtc); 165 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 166 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / 167 radeon_crtc->hw_mode.clock; 168 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - 169 radeon_crtc->hw_mode.crtc_vdisplay + 170 (radeon_crtc->v_border * 2); 171 vblank_time_us = vblank_lines * line_time_us; 172 break; 173 } 174 } 175 } 176 177 return vblank_time_us; 178 } 179 180 u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) 181 { 182 struct drm_device *dev = rdev->ddev; 183 struct drm_crtc *crtc; 184 struct radeon_crtc *radeon_crtc; 185 u32 vrefresh = 0; 186 187 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 189 radeon_crtc = to_radeon_crtc(crtc); 190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 191 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); 192 break; 193 } 194 } 195 } 196 return vrefresh; 197 } 198 199 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 200 u32 *p, u32 *u) 201 { 202 u32 b_c = 0; 203 u32 i_c; 204 u32 tmp; 205 206 i_c = (i * r_c) / 100; 207 tmp = i_c >> p_b; 208 209 while (tmp) { 210 b_c++; 211 tmp >>= 1; 212 } 213 214 *u = (b_c + 1) / 2; 215 *p = i_c / (1 << (2 * (*u))); 216 } 217 218 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) 219 { 220 u32 k, a, ah, al; 221 u32 t1; 222 223 if ((fl == 0) || (fh == 0) || (fl > fh)) 224 return -EINVAL; 225 226 k = (100 * fh) / fl; 227 t1 = (t * (k - 100)); 228 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); 229 a = (a + 5) / 10; 230 ah = ((a * t) + 5000) / 10000; 231 al = a - ah; 232 233 *th = t - ah; 234 *tl = t + al; 235 236 return 0; 237 } 238 239 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) 240 { 241 int i; 242 243 if (enable) { 244 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); 245 } else { 246 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); 247 248 WREG32(CG_RLC_REQ_AND_RSP, 0x2); 249 250 for (i = 0; i < rdev->usec_timeout; i++) { 251 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1) 252 break; 253 udelay(1); 254 } 255 256 WREG32(CG_RLC_REQ_AND_RSP, 0x0); 257 258 WREG32(GRBM_PWR_CNTL, 0x1); 259 RREG32(GRBM_PWR_CNTL); 260 } 261 } 262 263 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable) 264 { 265 if (enable) 266 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); 267 else 268 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 269 } 270 271 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable) 272 { 273 if (enable) 274 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); 275 else 276 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); 277 } 278 279 void r600_enable_acpi_pm(struct radeon_device *rdev) 280 { 281 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); 282 } 283 284 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) 285 { 286 if (enable) 287 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); 288 else 289 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); 290 } 291 292 bool r600_dynamicpm_enabled(struct radeon_device *rdev) 293 { 294 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) 295 return true; 296 else 297 return false; 298 } 299 300 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) 301 { 302 if (enable) 303 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); 304 else 305 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 306 } 307 308 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) 309 { 310 if (enable) 311 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); 312 else 313 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); 314 } 315 316 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable) 317 { 318 if (enable) 319 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN); 320 else 321 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN); 322 } 323 324 void r600_wait_for_spll_change(struct radeon_device *rdev) 325 { 326 int i; 327 328 for (i = 0; i < rdev->usec_timeout; i++) { 329 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS) 330 break; 331 udelay(1); 332 } 333 } 334 335 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p) 336 { 337 WREG32(CG_BSP, BSP(p) | BSU(u)); 338 } 339 340 void r600_set_at(struct radeon_device *rdev, 341 u32 l_to_m, u32 m_to_h, 342 u32 h_to_m, u32 m_to_l) 343 { 344 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h)); 345 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l)); 346 } 347 348 void r600_set_tc(struct radeon_device *rdev, 349 u32 index, u32 u_t, u32 d_t) 350 { 351 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t)); 352 } 353 354 void r600_select_td(struct radeon_device *rdev, 355 enum r600_td td) 356 { 357 if (td == R600_TD_AUTO) 358 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); 359 else 360 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); 361 if (td == R600_TD_UP) 362 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); 363 if (td == R600_TD_DOWN) 364 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); 365 } 366 367 void r600_set_vrc(struct radeon_device *rdev, u32 vrv) 368 { 369 WREG32(CG_FTV, vrv); 370 } 371 372 void r600_set_tpu(struct radeon_device *rdev, u32 u) 373 { 374 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK); 375 } 376 377 void r600_set_tpc(struct radeon_device *rdev, u32 c) 378 { 379 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); 380 } 381 382 void r600_set_sstu(struct radeon_device *rdev, u32 u) 383 { 384 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK); 385 } 386 387 void r600_set_sst(struct radeon_device *rdev, u32 t) 388 { 389 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK); 390 } 391 392 void r600_set_git(struct radeon_device *rdev, u32 t) 393 { 394 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK); 395 } 396 397 void r600_set_fctu(struct radeon_device *rdev, u32 u) 398 { 399 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK); 400 } 401 402 void r600_set_fct(struct radeon_device *rdev, u32 t) 403 { 404 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK); 405 } 406 407 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p) 408 { 409 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK); 410 } 411 412 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s) 413 { 414 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK); 415 } 416 417 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u) 418 { 419 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK); 420 } 421 422 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p) 423 { 424 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK); 425 } 426 427 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s) 428 { 429 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK); 430 } 431 432 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time) 433 { 434 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK); 435 } 436 437 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time) 438 { 439 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK); 440 } 441 442 void r600_engine_clock_entry_enable(struct radeon_device *rdev, 443 u32 index, bool enable) 444 { 445 if (enable) 446 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 447 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID); 448 else 449 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 450 0, ~STEP_0_SPLL_ENTRY_VALID); 451 } 452 453 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev, 454 u32 index, bool enable) 455 { 456 if (enable) 457 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 458 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE); 459 else 460 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 461 0, ~STEP_0_SPLL_STEP_ENABLE); 462 } 463 464 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev, 465 u32 index, bool enable) 466 { 467 if (enable) 468 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 469 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN); 470 else 471 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 472 0, ~STEP_0_POST_DIV_EN); 473 } 474 475 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev, 476 u32 index, u32 divider) 477 { 478 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 479 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK); 480 } 481 482 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev, 483 u32 index, u32 divider) 484 { 485 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 486 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK); 487 } 488 489 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev, 490 u32 index, u32 divider) 491 { 492 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 493 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK); 494 } 495 496 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev, 497 u32 index, u32 step_time) 498 { 499 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 500 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK); 501 } 502 503 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u) 504 { 505 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK); 506 } 507 508 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u) 509 { 510 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK); 511 } 512 513 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt) 514 { 515 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK); 516 } 517 518 void r600_voltage_control_enable_pins(struct radeon_device *rdev, 519 u64 mask) 520 { 521 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff); 522 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask)); 523 } 524 525 526 void r600_voltage_control_program_voltages(struct radeon_device *rdev, 527 enum r600_power_level index, u64 pins) 528 { 529 u32 tmp, mask; 530 u32 ix = 3 - (3 & index); 531 532 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff); 533 534 mask = 7 << (3 * ix); 535 tmp = RREG32(VID_UPPER_GPIO_CNTL); 536 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask); 537 WREG32(VID_UPPER_GPIO_CNTL, tmp); 538 } 539 540 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev, 541 u64 mask) 542 { 543 u32 gpio; 544 545 gpio = RREG32(GPIOPAD_MASK); 546 gpio &= ~mask; 547 WREG32(GPIOPAD_MASK, gpio); 548 549 gpio = RREG32(GPIOPAD_EN); 550 gpio &= ~mask; 551 WREG32(GPIOPAD_EN, gpio); 552 553 gpio = RREG32(GPIOPAD_A); 554 gpio &= ~mask; 555 WREG32(GPIOPAD_A, gpio); 556 } 557 558 void r600_power_level_enable(struct radeon_device *rdev, 559 enum r600_power_level index, bool enable) 560 { 561 u32 ix = 3 - (3 & index); 562 563 if (enable) 564 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE, 565 ~CTXSW_FREQ_STATE_ENABLE); 566 else 567 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0, 568 ~CTXSW_FREQ_STATE_ENABLE); 569 } 570 571 void r600_power_level_set_voltage_index(struct radeon_device *rdev, 572 enum r600_power_level index, u32 voltage_index) 573 { 574 u32 ix = 3 - (3 & index); 575 576 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 577 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK); 578 } 579 580 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev, 581 enum r600_power_level index, u32 mem_clock_index) 582 { 583 u32 ix = 3 - (3 & index); 584 585 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 586 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK); 587 } 588 589 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev, 590 enum r600_power_level index, u32 eng_clock_index) 591 { 592 u32 ix = 3 - (3 & index); 593 594 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 595 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK); 596 } 597 598 void r600_power_level_set_watermark_id(struct radeon_device *rdev, 599 enum r600_power_level index, 600 enum r600_display_watermark watermark_id) 601 { 602 u32 ix = 3 - (3 & index); 603 u32 tmp = 0; 604 605 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH) 606 tmp = CTXSW_FREQ_DISPLAY_WATERMARK; 607 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK); 608 } 609 610 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev, 611 enum r600_power_level index, bool compatible) 612 { 613 u32 ix = 3 - (3 & index); 614 u32 tmp = 0; 615 616 if (compatible) 617 tmp = CTXSW_FREQ_GEN2PCIE_VOLT; 618 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT); 619 } 620 621 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev) 622 { 623 u32 tmp; 624 625 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK; 626 tmp >>= CURRENT_PROFILE_INDEX_SHIFT; 627 return tmp; 628 } 629 630 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev) 631 { 632 u32 tmp; 633 634 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK; 635 tmp >>= TARGET_PROFILE_INDEX_SHIFT; 636 return tmp; 637 } 638 639 void r600_power_level_set_enter_index(struct radeon_device *rdev, 640 enum r600_power_level index) 641 { 642 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index), 643 ~DYN_PWR_ENTER_INDEX_MASK); 644 } 645 646 void r600_wait_for_power_level_unequal(struct radeon_device *rdev, 647 enum r600_power_level index) 648 { 649 int i; 650 651 for (i = 0; i < rdev->usec_timeout; i++) { 652 if (r600_power_level_get_target_index(rdev) != index) 653 break; 654 udelay(1); 655 } 656 657 for (i = 0; i < rdev->usec_timeout; i++) { 658 if (r600_power_level_get_current_index(rdev) != index) 659 break; 660 udelay(1); 661 } 662 } 663 664 void r600_wait_for_power_level(struct radeon_device *rdev, 665 enum r600_power_level index) 666 { 667 int i; 668 669 for (i = 0; i < rdev->usec_timeout; i++) { 670 if (r600_power_level_get_target_index(rdev) == index) 671 break; 672 udelay(1); 673 } 674 675 for (i = 0; i < rdev->usec_timeout; i++) { 676 if (r600_power_level_get_current_index(rdev) == index) 677 break; 678 udelay(1); 679 } 680 } 681 682 void r600_start_dpm(struct radeon_device *rdev) 683 { 684 r600_enable_sclk_control(rdev, false); 685 r600_enable_mclk_control(rdev, false); 686 687 r600_dynamicpm_enable(rdev, true); 688 689 radeon_wait_for_vblank(rdev, 0); 690 radeon_wait_for_vblank(rdev, 1); 691 692 r600_enable_spll_bypass(rdev, true); 693 r600_wait_for_spll_change(rdev); 694 r600_enable_spll_bypass(rdev, false); 695 r600_wait_for_spll_change(rdev); 696 697 r600_enable_spll_bypass(rdev, true); 698 r600_wait_for_spll_change(rdev); 699 r600_enable_spll_bypass(rdev, false); 700 r600_wait_for_spll_change(rdev); 701 702 r600_enable_sclk_control(rdev, true); 703 r600_enable_mclk_control(rdev, true); 704 } 705 706 void r600_stop_dpm(struct radeon_device *rdev) 707 { 708 r600_dynamicpm_enable(rdev, false); 709 } 710 711 int r600_dpm_pre_set_power_state(struct radeon_device *rdev) 712 { 713 return 0; 714 } 715 716 void r600_dpm_post_set_power_state(struct radeon_device *rdev) 717 { 718 719 } 720 721 bool r600_is_uvd_state(u32 class, u32 class2) 722 { 723 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 724 return true; 725 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 726 return true; 727 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 728 return true; 729 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 730 return true; 731 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 732 return true; 733 return false; 734 } 735 736 static int r600_set_thermal_temperature_range(struct radeon_device *rdev, 737 int min_temp, int max_temp) 738 { 739 int low_temp = 0 * 1000; 740 int high_temp = 255 * 1000; 741 742 if (low_temp < min_temp) 743 low_temp = min_temp; 744 if (high_temp > max_temp) 745 high_temp = max_temp; 746 if (high_temp < low_temp) { 747 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 748 return -EINVAL; 749 } 750 751 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); 752 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); 753 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); 754 755 rdev->pm.dpm.thermal.min_temp = low_temp; 756 rdev->pm.dpm.thermal.max_temp = high_temp; 757 758 return 0; 759 } 760 761 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) 762 { 763 switch (sensor) { 764 case THERMAL_TYPE_RV6XX: 765 case THERMAL_TYPE_RV770: 766 case THERMAL_TYPE_EVERGREEN: 767 case THERMAL_TYPE_SUMO: 768 case THERMAL_TYPE_NI: 769 case THERMAL_TYPE_SI: 770 case THERMAL_TYPE_CI: 771 case THERMAL_TYPE_KV: 772 return true; 773 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 774 case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 775 return false; /* need special handling */ 776 case THERMAL_TYPE_NONE: 777 case THERMAL_TYPE_EXTERNAL: 778 case THERMAL_TYPE_EXTERNAL_GPIO: 779 default: 780 return false; 781 } 782 } 783 784 int r600_dpm_late_enable(struct radeon_device *rdev) 785 { 786 int ret; 787 788 if (rdev->irq.installed && 789 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 790 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 791 if (ret) 792 return ret; 793 rdev->irq.dpm_thermal = true; 794 radeon_irq_set(rdev); 795 } 796 797 return 0; 798 } 799 800 union power_info { 801 struct _ATOM_POWERPLAY_INFO info; 802 struct _ATOM_POWERPLAY_INFO_V2 info_2; 803 struct _ATOM_POWERPLAY_INFO_V3 info_3; 804 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 805 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 806 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 807 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 808 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 809 }; 810 811 union fan_info { 812 struct _ATOM_PPLIB_FANTABLE fan; 813 struct _ATOM_PPLIB_FANTABLE2 fan2; 814 struct _ATOM_PPLIB_FANTABLE3 fan3; 815 }; 816 817 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, 818 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 819 { 820 u32 size = atom_table->ucNumEntries * 821 sizeof(struct radeon_clock_voltage_dependency_entry); 822 int i; 823 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 824 825 radeon_table->entries = kzalloc(size, GFP_KERNEL); 826 if (!radeon_table->entries) 827 return -ENOMEM; 828 829 entry = &atom_table->entries[0]; 830 for (i = 0; i < atom_table->ucNumEntries; i++) { 831 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 832 (entry->ucClockHigh << 16); 833 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); 834 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 835 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 836 } 837 radeon_table->count = atom_table->ucNumEntries; 838 839 return 0; 840 } 841 842 int r600_get_platform_caps(struct radeon_device *rdev) 843 { 844 struct radeon_mode_info *mode_info = &rdev->mode_info; 845 union power_info *power_info; 846 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 847 u16 data_offset; 848 u8 frev, crev; 849 850 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 851 &frev, &crev, &data_offset)) 852 return -EINVAL; 853 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 854 855 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 856 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 857 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 858 859 return 0; 860 } 861 862 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 863 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 864 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 865 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 866 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 867 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 868 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 869 870 int r600_parse_extended_power_table(struct radeon_device *rdev) 871 { 872 struct radeon_mode_info *mode_info = &rdev->mode_info; 873 union power_info *power_info; 874 union fan_info *fan_info; 875 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 876 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 877 u16 data_offset; 878 u8 frev, crev; 879 int ret, i; 880 881 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 882 &frev, &crev, &data_offset)) 883 return -EINVAL; 884 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 885 886 /* fan table */ 887 if (le16_to_cpu(power_info->pplib.usTableSize) >= 888 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 889 if (power_info->pplib3.usFanTableOffset) { 890 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 891 le16_to_cpu(power_info->pplib3.usFanTableOffset)); 892 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 893 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 894 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 895 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 896 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 897 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 898 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 899 if (fan_info->fan.ucFanTableFormat >= 2) 900 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 901 else 902 rdev->pm.dpm.fan.t_max = 10900; 903 rdev->pm.dpm.fan.cycle_delay = 100000; 904 if (fan_info->fan.ucFanTableFormat >= 3) { 905 rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; 906 rdev->pm.dpm.fan.default_max_fan_pwm = 907 le16_to_cpu(fan_info->fan3.usFanPWMMax); 908 rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836; 909 rdev->pm.dpm.fan.fan_output_sensitivity = 910 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); 911 } 912 rdev->pm.dpm.fan.ucode_fan_control = true; 913 } 914 } 915 916 /* clock dependancy tables, shedding tables */ 917 if (le16_to_cpu(power_info->pplib.usTableSize) >= 918 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 919 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 920 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 921 (mode_info->atom_context->bios + data_offset + 922 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 923 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 924 dep_table); 925 if (ret) 926 return ret; 927 } 928 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 929 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 930 (mode_info->atom_context->bios + data_offset + 931 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 932 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 933 dep_table); 934 if (ret) { 935 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 936 return ret; 937 } 938 } 939 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 940 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 941 (mode_info->atom_context->bios + data_offset + 942 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 943 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 944 dep_table); 945 if (ret) { 946 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 947 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 948 return ret; 949 } 950 } 951 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 952 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 953 (mode_info->atom_context->bios + data_offset + 954 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 955 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 956 dep_table); 957 if (ret) { 958 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 959 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 960 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 961 return ret; 962 } 963 } 964 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 965 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 966 (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 967 (mode_info->atom_context->bios + data_offset + 968 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 969 if (clk_v->ucNumEntries) { 970 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 971 le16_to_cpu(clk_v->entries[0].usSclkLow) | 972 (clk_v->entries[0].ucSclkHigh << 16); 973 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 974 le16_to_cpu(clk_v->entries[0].usMclkLow) | 975 (clk_v->entries[0].ucMclkHigh << 16); 976 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 977 le16_to_cpu(clk_v->entries[0].usVddc); 978 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 979 le16_to_cpu(clk_v->entries[0].usVddci); 980 } 981 } 982 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 983 ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 984 (ATOM_PPLIB_PhaseSheddingLimits_Table *) 985 (mode_info->atom_context->bios + data_offset + 986 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 987 ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 988 989 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 990 kzalloc(psl->ucNumEntries * 991 sizeof(struct radeon_phase_shedding_limits_entry), 992 GFP_KERNEL); 993 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 994 r600_free_extended_power_table(rdev); 995 return -ENOMEM; 996 } 997 998 entry = &psl->entries[0]; 999 for (i = 0; i < psl->ucNumEntries; i++) { 1000 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 1001 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 1002 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 1003 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 1004 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 1005 le16_to_cpu(entry->usVoltage); 1006 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 1007 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 1008 } 1009 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 1010 psl->ucNumEntries; 1011 } 1012 } 1013 1014 /* cac data */ 1015 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1016 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 1017 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 1018 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 1019 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit; 1020 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 1021 if (rdev->pm.dpm.tdp_od_limit) 1022 rdev->pm.dpm.power_control = true; 1023 else 1024 rdev->pm.dpm.power_control = false; 1025 rdev->pm.dpm.tdp_adjustment = 0; 1026 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 1027 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 1028 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 1029 if (power_info->pplib5.usCACLeakageTableOffset) { 1030 ATOM_PPLIB_CAC_Leakage_Table *cac_table = 1031 (ATOM_PPLIB_CAC_Leakage_Table *) 1032 (mode_info->atom_context->bios + data_offset + 1033 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 1034 ATOM_PPLIB_CAC_Leakage_Record *entry; 1035 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); 1036 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 1037 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 1038 r600_free_extended_power_table(rdev); 1039 return -ENOMEM; 1040 } 1041 entry = &cac_table->entries[0]; 1042 for (i = 0; i < cac_table->ucNumEntries; i++) { 1043 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1044 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 1045 le16_to_cpu(entry->usVddc1); 1046 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 1047 le16_to_cpu(entry->usVddc2); 1048 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 1049 le16_to_cpu(entry->usVddc3); 1050 } else { 1051 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 1052 le16_to_cpu(entry->usVddc); 1053 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 1054 le32_to_cpu(entry->ulLeakageValue); 1055 } 1056 entry = (ATOM_PPLIB_CAC_Leakage_Record *) 1057 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 1058 } 1059 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 1060 } 1061 } 1062 1063 /* ext tables */ 1064 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1065 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 1066 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 1067 (mode_info->atom_context->bios + data_offset + 1068 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 1069 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 1070 ext_hdr->usVCETableOffset) { 1071 VCEClockInfoArray *array = (VCEClockInfoArray *) 1072 (mode_info->atom_context->bios + data_offset + 1073 le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 1074 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 1075 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 1076 (mode_info->atom_context->bios + data_offset + 1077 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1078 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 1079 ATOM_PPLIB_VCE_State_Table *states = 1080 (ATOM_PPLIB_VCE_State_Table *) 1081 (mode_info->atom_context->bios + data_offset + 1082 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1083 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 1084 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 1085 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 1086 ATOM_PPLIB_VCE_State_Record *state_entry; 1087 VCEClockInfo *vce_clk; 1088 u32 size = limits->numEntries * 1089 sizeof(struct radeon_vce_clock_voltage_dependency_entry); 1090 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 1091 kzalloc(size, GFP_KERNEL); 1092 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 1093 r600_free_extended_power_table(rdev); 1094 return -ENOMEM; 1095 } 1096 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 1097 limits->numEntries; 1098 entry = &limits->entries[0]; 1099 state_entry = &states->entries[0]; 1100 for (i = 0; i < limits->numEntries; i++) { 1101 vce_clk = (VCEClockInfo *) 1102 ((u8 *)&array->entries[0] + 1103 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 1104 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 1105 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 1106 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 1107 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 1108 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 1109 le16_to_cpu(entry->usVoltage); 1110 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 1111 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 1112 } 1113 for (i = 0; i < states->numEntries; i++) { 1114 if (i >= RADEON_MAX_VCE_LEVELS) 1115 break; 1116 vce_clk = (VCEClockInfo *) 1117 ((u8 *)&array->entries[0] + 1118 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 1119 rdev->pm.dpm.vce_states[i].evclk = 1120 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 1121 rdev->pm.dpm.vce_states[i].ecclk = 1122 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 1123 rdev->pm.dpm.vce_states[i].clk_idx = 1124 state_entry->ucClockInfoIndex & 0x3f; 1125 rdev->pm.dpm.vce_states[i].pstate = 1126 (state_entry->ucClockInfoIndex & 0xc0) >> 6; 1127 state_entry = (ATOM_PPLIB_VCE_State_Record *) 1128 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 1129 } 1130 } 1131 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 1132 ext_hdr->usUVDTableOffset) { 1133 UVDClockInfoArray *array = (UVDClockInfoArray *) 1134 (mode_info->atom_context->bios + data_offset + 1135 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 1136 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 1137 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 1138 (mode_info->atom_context->bios + data_offset + 1139 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 1140 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 1141 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 1142 u32 size = limits->numEntries * 1143 sizeof(struct radeon_uvd_clock_voltage_dependency_entry); 1144 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 1145 kzalloc(size, GFP_KERNEL); 1146 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 1147 r600_free_extended_power_table(rdev); 1148 return -ENOMEM; 1149 } 1150 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 1151 limits->numEntries; 1152 entry = &limits->entries[0]; 1153 for (i = 0; i < limits->numEntries; i++) { 1154 UVDClockInfo *uvd_clk = (UVDClockInfo *) 1155 ((u8 *)&array->entries[0] + 1156 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 1157 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 1158 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 1159 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1160 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1161 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1162 le16_to_cpu(entry->usVoltage); 1163 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1164 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1165 } 1166 } 1167 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 1168 ext_hdr->usSAMUTableOffset) { 1169 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 1170 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 1171 (mode_info->atom_context->bios + data_offset + 1172 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 1173 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 1174 u32 size = limits->numEntries * 1175 sizeof(struct radeon_clock_voltage_dependency_entry); 1176 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 1177 kzalloc(size, GFP_KERNEL); 1178 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 1179 r600_free_extended_power_table(rdev); 1180 return -ENOMEM; 1181 } 1182 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 1183 limits->numEntries; 1184 entry = &limits->entries[0]; 1185 for (i = 0; i < limits->numEntries; i++) { 1186 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 1187 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 1188 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 1189 le16_to_cpu(entry->usVoltage); 1190 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 1191 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 1192 } 1193 } 1194 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 1195 ext_hdr->usPPMTableOffset) { 1196 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 1197 (mode_info->atom_context->bios + data_offset + 1198 le16_to_cpu(ext_hdr->usPPMTableOffset)); 1199 rdev->pm.dpm.dyn_state.ppm_table = 1200 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); 1201 if (!rdev->pm.dpm.dyn_state.ppm_table) { 1202 r600_free_extended_power_table(rdev); 1203 return -ENOMEM; 1204 } 1205 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 1206 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 1207 le16_to_cpu(ppm->usCpuCoreNumber); 1208 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp = 1209 le32_to_cpu(ppm->ulPlatformTDP); 1210 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 1211 le32_to_cpu(ppm->ulSmallACPlatformTDP); 1212 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc = 1213 le32_to_cpu(ppm->ulPlatformTDC); 1214 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 1215 le32_to_cpu(ppm->ulSmallACPlatformTDC); 1216 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp = 1217 le32_to_cpu(ppm->ulApuTDP); 1218 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 1219 le32_to_cpu(ppm->ulDGpuTDP); 1220 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 1221 le32_to_cpu(ppm->ulDGpuUlvPower); 1222 rdev->pm.dpm.dyn_state.ppm_table->tj_max = 1223 le32_to_cpu(ppm->ulTjmax); 1224 } 1225 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 1226 ext_hdr->usACPTableOffset) { 1227 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 1228 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 1229 (mode_info->atom_context->bios + data_offset + 1230 le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 1231 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 1232 u32 size = limits->numEntries * 1233 sizeof(struct radeon_clock_voltage_dependency_entry); 1234 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 1235 kzalloc(size, GFP_KERNEL); 1236 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 1237 r600_free_extended_power_table(rdev); 1238 return -ENOMEM; 1239 } 1240 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 1241 limits->numEntries; 1242 entry = &limits->entries[0]; 1243 for (i = 0; i < limits->numEntries; i++) { 1244 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 1245 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 1246 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 1247 le16_to_cpu(entry->usVoltage); 1248 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 1249 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 1250 } 1251 } 1252 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 1253 ext_hdr->usPowerTuneTableOffset) { 1254 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 1255 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1256 ATOM_PowerTune_Table *pt; 1257 rdev->pm.dpm.dyn_state.cac_tdp_table = 1258 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL); 1259 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { 1260 r600_free_extended_power_table(rdev); 1261 return -ENOMEM; 1262 } 1263 if (rev > 0) { 1264 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 1265 (mode_info->atom_context->bios + data_offset + 1266 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1267 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 1268 le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); 1269 pt = &ppt->power_tune_table; 1270 } else { 1271 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 1272 (mode_info->atom_context->bios + data_offset + 1273 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1274 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 1275 pt = &ppt->power_tune_table; 1276 } 1277 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 1278 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 1279 le16_to_cpu(pt->usConfigurableTDP); 1280 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 1281 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 1282 le16_to_cpu(pt->usBatteryPowerLimit); 1283 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 1284 le16_to_cpu(pt->usSmallPowerLimit); 1285 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 1286 le16_to_cpu(pt->usLowCACLeakage); 1287 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 1288 le16_to_cpu(pt->usHighCACLeakage); 1289 } 1290 } 1291 1292 return 0; 1293 } 1294 1295 void r600_free_extended_power_table(struct radeon_device *rdev) 1296 { 1297 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; 1298 1299 kfree(dyn_state->vddc_dependency_on_sclk.entries); 1300 kfree(dyn_state->vddci_dependency_on_mclk.entries); 1301 kfree(dyn_state->vddc_dependency_on_mclk.entries); 1302 kfree(dyn_state->mvdd_dependency_on_mclk.entries); 1303 kfree(dyn_state->cac_leakage_table.entries); 1304 kfree(dyn_state->phase_shedding_limits_table.entries); 1305 kfree(dyn_state->ppm_table); 1306 kfree(dyn_state->cac_tdp_table); 1307 kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 1308 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 1309 kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 1310 kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 1311 } 1312 1313 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1314 u32 sys_mask, 1315 enum radeon_pcie_gen asic_gen, 1316 enum radeon_pcie_gen default_gen) 1317 { 1318 switch (asic_gen) { 1319 case RADEON_PCIE_GEN1: 1320 return RADEON_PCIE_GEN1; 1321 case RADEON_PCIE_GEN2: 1322 return RADEON_PCIE_GEN2; 1323 case RADEON_PCIE_GEN3: 1324 return RADEON_PCIE_GEN3; 1325 default: 1326 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3)) 1327 return RADEON_PCIE_GEN3; 1328 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2)) 1329 return RADEON_PCIE_GEN2; 1330 else 1331 return RADEON_PCIE_GEN1; 1332 } 1333 return RADEON_PCIE_GEN1; 1334 } 1335 1336 u16 r600_get_pcie_lane_support(struct radeon_device *rdev, 1337 u16 asic_lanes, 1338 u16 default_lanes) 1339 { 1340 switch (asic_lanes) { 1341 case 0: 1342 default: 1343 return default_lanes; 1344 case 1: 1345 return 1; 1346 case 2: 1347 return 2; 1348 case 4: 1349 return 4; 1350 case 8: 1351 return 8; 1352 case 12: 1353 return 12; 1354 case 16: 1355 return 16; 1356 } 1357 } 1358 1359 u8 r600_encode_pci_lane_width(u32 lanes) 1360 { 1361 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; 1362 1363 if (lanes > 16) 1364 return 0; 1365 1366 return encoded_lanes[lanes]; 1367 } 1368