1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "drmP.h" 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "r600d.h" 29 #include "r600_dpm.h" 30 #include "atom.h" 31 32 const u32 r600_utc[R600_PM_NUMBER_OF_TC] = 33 { 34 R600_UTC_DFLT_00, 35 R600_UTC_DFLT_01, 36 R600_UTC_DFLT_02, 37 R600_UTC_DFLT_03, 38 R600_UTC_DFLT_04, 39 R600_UTC_DFLT_05, 40 R600_UTC_DFLT_06, 41 R600_UTC_DFLT_07, 42 R600_UTC_DFLT_08, 43 R600_UTC_DFLT_09, 44 R600_UTC_DFLT_10, 45 R600_UTC_DFLT_11, 46 R600_UTC_DFLT_12, 47 R600_UTC_DFLT_13, 48 R600_UTC_DFLT_14, 49 }; 50 51 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = 52 { 53 R600_DTC_DFLT_00, 54 R600_DTC_DFLT_01, 55 R600_DTC_DFLT_02, 56 R600_DTC_DFLT_03, 57 R600_DTC_DFLT_04, 58 R600_DTC_DFLT_05, 59 R600_DTC_DFLT_06, 60 R600_DTC_DFLT_07, 61 R600_DTC_DFLT_08, 62 R600_DTC_DFLT_09, 63 R600_DTC_DFLT_10, 64 R600_DTC_DFLT_11, 65 R600_DTC_DFLT_12, 66 R600_DTC_DFLT_13, 67 R600_DTC_DFLT_14, 68 }; 69 70 void r600_dpm_print_class_info(u32 class, u32 class2) 71 { 72 printk("\tui class: "); 73 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 74 case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 75 default: 76 printk("none\n"); 77 break; 78 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 79 printk("battery\n"); 80 break; 81 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 82 printk("balanced\n"); 83 break; 84 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 85 printk("performance\n"); 86 break; 87 } 88 printk("\tinternal class: "); 89 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 90 (class2 == 0)) 91 printk("none"); 92 else { 93 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 94 printk("boot "); 95 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 96 printk("thermal "); 97 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 98 printk("limited_pwr "); 99 if (class & ATOM_PPLIB_CLASSIFICATION_REST) 100 printk("rest "); 101 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 102 printk("forced "); 103 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 104 printk("3d_perf "); 105 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 106 printk("ovrdrv "); 107 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 108 printk("uvd "); 109 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 110 printk("3d_low "); 111 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 112 printk("acpi "); 113 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 114 printk("uvd_hd2 "); 115 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 116 printk("uvd_hd "); 117 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 118 printk("uvd_sd "); 119 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 120 printk("limited_pwr2 "); 121 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 122 printk("ulv "); 123 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 124 printk("uvd_mvc "); 125 } 126 printk("\n"); 127 } 128 129 void r600_dpm_print_cap_info(u32 caps) 130 { 131 printk("\tcaps: "); 132 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 133 printk("single_disp "); 134 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 135 printk("video "); 136 if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 137 printk("no_dc "); 138 printk("\n"); 139 } 140 141 void r600_dpm_print_ps_status(struct radeon_device *rdev, 142 struct radeon_ps *rps) 143 { 144 printk("\tstatus: "); 145 if (rps == rdev->pm.dpm.current_ps) 146 printk("c "); 147 if (rps == rdev->pm.dpm.requested_ps) 148 printk("r "); 149 if (rps == rdev->pm.dpm.boot_ps) 150 printk("b "); 151 printk("\n"); 152 } 153 154 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev) 155 { 156 struct drm_device *dev = rdev->ddev; 157 struct drm_crtc *crtc; 158 struct radeon_crtc *radeon_crtc; 159 u32 line_time_us, vblank_lines; 160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 161 162 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 163 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 164 radeon_crtc = to_radeon_crtc(crtc); 165 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 166 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / 167 radeon_crtc->hw_mode.clock; 168 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - 169 radeon_crtc->hw_mode.crtc_vdisplay + 170 (radeon_crtc->v_border * 2); 171 vblank_time_us = vblank_lines * line_time_us; 172 break; 173 } 174 } 175 } 176 177 return vblank_time_us; 178 } 179 180 u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) 181 { 182 struct drm_device *dev = rdev->ddev; 183 struct drm_crtc *crtc; 184 struct radeon_crtc *radeon_crtc; 185 u32 vrefresh = 0; 186 187 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 188 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 189 radeon_crtc = to_radeon_crtc(crtc); 190 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 191 vrefresh = radeon_crtc->hw_mode.vrefresh; 192 break; 193 } 194 } 195 } 196 return vrefresh; 197 } 198 199 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 200 u32 *p, u32 *u) 201 { 202 u32 b_c = 0; 203 u32 i_c; 204 u32 tmp; 205 206 i_c = (i * r_c) / 100; 207 tmp = i_c >> p_b; 208 209 while (tmp) { 210 b_c++; 211 tmp >>= 1; 212 } 213 214 *u = (b_c + 1) / 2; 215 *p = i_c / (1 << (2 * (*u))); 216 } 217 218 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) 219 { 220 u32 k, a, ah, al; 221 u32 t1; 222 223 if ((fl == 0) || (fh == 0) || (fl > fh)) 224 return -EINVAL; 225 226 k = (100 * fh) / fl; 227 t1 = (t * (k - 100)); 228 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); 229 a = (a + 5) / 10; 230 ah = ((a * t) + 5000) / 10000; 231 al = a - ah; 232 233 *th = t - ah; 234 *tl = t + al; 235 236 return 0; 237 } 238 239 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable) 240 { 241 int i; 242 243 if (enable) { 244 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN); 245 } else { 246 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); 247 248 WREG32(CG_RLC_REQ_AND_RSP, 0x2); 249 250 for (i = 0; i < rdev->usec_timeout; i++) { 251 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1) 252 break; 253 udelay(1); 254 } 255 256 WREG32(CG_RLC_REQ_AND_RSP, 0x0); 257 258 WREG32(GRBM_PWR_CNTL, 0x1); 259 RREG32(GRBM_PWR_CNTL); 260 } 261 } 262 263 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable) 264 { 265 if (enable) 266 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); 267 else 268 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 269 } 270 271 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable) 272 { 273 if (enable) 274 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); 275 else 276 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); 277 } 278 279 void r600_enable_acpi_pm(struct radeon_device *rdev) 280 { 281 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); 282 } 283 284 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable) 285 { 286 if (enable) 287 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); 288 else 289 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); 290 } 291 292 bool r600_dynamicpm_enabled(struct radeon_device *rdev) 293 { 294 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN) 295 return true; 296 else 297 return false; 298 } 299 300 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) 301 { 302 if (enable) 303 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); 304 else 305 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); 306 } 307 308 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) 309 { 310 if (enable) 311 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF); 312 else 313 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF); 314 } 315 316 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable) 317 { 318 if (enable) 319 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN); 320 else 321 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN); 322 } 323 324 void r600_wait_for_spll_change(struct radeon_device *rdev) 325 { 326 int i; 327 328 for (i = 0; i < rdev->usec_timeout; i++) { 329 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS) 330 break; 331 udelay(1); 332 } 333 } 334 335 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p) 336 { 337 WREG32(CG_BSP, BSP(p) | BSU(u)); 338 } 339 340 void r600_set_at(struct radeon_device *rdev, 341 u32 l_to_m, u32 m_to_h, 342 u32 h_to_m, u32 m_to_l) 343 { 344 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h)); 345 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l)); 346 } 347 348 void r600_set_tc(struct radeon_device *rdev, 349 u32 index, u32 u_t, u32 d_t) 350 { 351 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t)); 352 } 353 354 void r600_select_td(struct radeon_device *rdev, 355 enum r600_td td) 356 { 357 if (td == R600_TD_AUTO) 358 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); 359 else 360 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); 361 if (td == R600_TD_UP) 362 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); 363 if (td == R600_TD_DOWN) 364 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); 365 } 366 367 void r600_set_vrc(struct radeon_device *rdev, u32 vrv) 368 { 369 WREG32(CG_FTV, vrv); 370 } 371 372 void r600_set_tpu(struct radeon_device *rdev, u32 u) 373 { 374 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK); 375 } 376 377 void r600_set_tpc(struct radeon_device *rdev, u32 c) 378 { 379 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); 380 } 381 382 void r600_set_sstu(struct radeon_device *rdev, u32 u) 383 { 384 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK); 385 } 386 387 void r600_set_sst(struct radeon_device *rdev, u32 t) 388 { 389 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK); 390 } 391 392 void r600_set_git(struct radeon_device *rdev, u32 t) 393 { 394 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK); 395 } 396 397 void r600_set_fctu(struct radeon_device *rdev, u32 u) 398 { 399 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK); 400 } 401 402 void r600_set_fct(struct radeon_device *rdev, u32 t) 403 { 404 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK); 405 } 406 407 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p) 408 { 409 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK); 410 } 411 412 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s) 413 { 414 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK); 415 } 416 417 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u) 418 { 419 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK); 420 } 421 422 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p) 423 { 424 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK); 425 } 426 427 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s) 428 { 429 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK); 430 } 431 432 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time) 433 { 434 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK); 435 } 436 437 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time) 438 { 439 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK); 440 } 441 442 void r600_engine_clock_entry_enable(struct radeon_device *rdev, 443 u32 index, bool enable) 444 { 445 if (enable) 446 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 447 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID); 448 else 449 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 450 0, ~STEP_0_SPLL_ENTRY_VALID); 451 } 452 453 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev, 454 u32 index, bool enable) 455 { 456 if (enable) 457 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 458 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE); 459 else 460 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 461 0, ~STEP_0_SPLL_STEP_ENABLE); 462 } 463 464 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev, 465 u32 index, bool enable) 466 { 467 if (enable) 468 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 469 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN); 470 else 471 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2), 472 0, ~STEP_0_POST_DIV_EN); 473 } 474 475 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev, 476 u32 index, u32 divider) 477 { 478 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 479 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK); 480 } 481 482 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev, 483 u32 index, u32 divider) 484 { 485 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 486 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK); 487 } 488 489 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev, 490 u32 index, u32 divider) 491 { 492 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 493 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK); 494 } 495 496 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev, 497 u32 index, u32 step_time) 498 { 499 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2), 500 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK); 501 } 502 503 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u) 504 { 505 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK); 506 } 507 508 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u) 509 { 510 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK); 511 } 512 513 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt) 514 { 515 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK); 516 } 517 518 void r600_voltage_control_enable_pins(struct radeon_device *rdev, 519 u64 mask) 520 { 521 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff); 522 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask)); 523 } 524 525 526 void r600_voltage_control_program_voltages(struct radeon_device *rdev, 527 enum r600_power_level index, u64 pins) 528 { 529 u32 tmp, mask; 530 u32 ix = 3 - (3 & index); 531 532 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff); 533 534 mask = 7 << (3 * ix); 535 tmp = RREG32(VID_UPPER_GPIO_CNTL); 536 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask); 537 WREG32(VID_UPPER_GPIO_CNTL, tmp); 538 } 539 540 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev, 541 u64 mask) 542 { 543 u32 gpio; 544 545 gpio = RREG32(GPIOPAD_MASK); 546 gpio &= ~mask; 547 WREG32(GPIOPAD_MASK, gpio); 548 549 gpio = RREG32(GPIOPAD_EN); 550 gpio &= ~mask; 551 WREG32(GPIOPAD_EN, gpio); 552 553 gpio = RREG32(GPIOPAD_A); 554 gpio &= ~mask; 555 WREG32(GPIOPAD_A, gpio); 556 } 557 558 void r600_power_level_enable(struct radeon_device *rdev, 559 enum r600_power_level index, bool enable) 560 { 561 u32 ix = 3 - (3 & index); 562 563 if (enable) 564 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE, 565 ~CTXSW_FREQ_STATE_ENABLE); 566 else 567 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0, 568 ~CTXSW_FREQ_STATE_ENABLE); 569 } 570 571 void r600_power_level_set_voltage_index(struct radeon_device *rdev, 572 enum r600_power_level index, u32 voltage_index) 573 { 574 u32 ix = 3 - (3 & index); 575 576 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 577 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK); 578 } 579 580 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev, 581 enum r600_power_level index, u32 mem_clock_index) 582 { 583 u32 ix = 3 - (3 & index); 584 585 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 586 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK); 587 } 588 589 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev, 590 enum r600_power_level index, u32 eng_clock_index) 591 { 592 u32 ix = 3 - (3 & index); 593 594 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 595 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK); 596 } 597 598 void r600_power_level_set_watermark_id(struct radeon_device *rdev, 599 enum r600_power_level index, 600 enum r600_display_watermark watermark_id) 601 { 602 u32 ix = 3 - (3 & index); 603 u32 tmp = 0; 604 605 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH) 606 tmp = CTXSW_FREQ_DISPLAY_WATERMARK; 607 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK); 608 } 609 610 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev, 611 enum r600_power_level index, bool compatible) 612 { 613 u32 ix = 3 - (3 & index); 614 u32 tmp = 0; 615 616 if (compatible) 617 tmp = CTXSW_FREQ_GEN2PCIE_VOLT; 618 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT); 619 } 620 621 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev) 622 { 623 u32 tmp; 624 625 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK; 626 tmp >>= CURRENT_PROFILE_INDEX_SHIFT; 627 return tmp; 628 } 629 630 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev) 631 { 632 u32 tmp; 633 634 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK; 635 tmp >>= TARGET_PROFILE_INDEX_SHIFT; 636 return tmp; 637 } 638 639 void r600_power_level_set_enter_index(struct radeon_device *rdev, 640 enum r600_power_level index) 641 { 642 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index), 643 ~DYN_PWR_ENTER_INDEX_MASK); 644 } 645 646 void r600_wait_for_power_level_unequal(struct radeon_device *rdev, 647 enum r600_power_level index) 648 { 649 int i; 650 651 for (i = 0; i < rdev->usec_timeout; i++) { 652 if (r600_power_level_get_target_index(rdev) != index) 653 break; 654 udelay(1); 655 } 656 657 for (i = 0; i < rdev->usec_timeout; i++) { 658 if (r600_power_level_get_current_index(rdev) != index) 659 break; 660 udelay(1); 661 } 662 } 663 664 void r600_wait_for_power_level(struct radeon_device *rdev, 665 enum r600_power_level index) 666 { 667 int i; 668 669 for (i = 0; i < rdev->usec_timeout; i++) { 670 if (r600_power_level_get_target_index(rdev) == index) 671 break; 672 udelay(1); 673 } 674 675 for (i = 0; i < rdev->usec_timeout; i++) { 676 if (r600_power_level_get_current_index(rdev) == index) 677 break; 678 udelay(1); 679 } 680 } 681 682 void r600_start_dpm(struct radeon_device *rdev) 683 { 684 r600_enable_sclk_control(rdev, false); 685 r600_enable_mclk_control(rdev, false); 686 687 r600_dynamicpm_enable(rdev, true); 688 689 radeon_wait_for_vblank(rdev, 0); 690 radeon_wait_for_vblank(rdev, 1); 691 692 r600_enable_spll_bypass(rdev, true); 693 r600_wait_for_spll_change(rdev); 694 r600_enable_spll_bypass(rdev, false); 695 r600_wait_for_spll_change(rdev); 696 697 r600_enable_spll_bypass(rdev, true); 698 r600_wait_for_spll_change(rdev); 699 r600_enable_spll_bypass(rdev, false); 700 r600_wait_for_spll_change(rdev); 701 702 r600_enable_sclk_control(rdev, true); 703 r600_enable_mclk_control(rdev, true); 704 } 705 706 void r600_stop_dpm(struct radeon_device *rdev) 707 { 708 r600_dynamicpm_enable(rdev, false); 709 } 710 711 int r600_dpm_pre_set_power_state(struct radeon_device *rdev) 712 { 713 return 0; 714 } 715 716 void r600_dpm_post_set_power_state(struct radeon_device *rdev) 717 { 718 719 } 720 721 bool r600_is_uvd_state(u32 class, u32 class2) 722 { 723 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 724 return true; 725 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 726 return true; 727 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 728 return true; 729 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 730 return true; 731 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 732 return true; 733 return false; 734 } 735 736 static int r600_set_thermal_temperature_range(struct radeon_device *rdev, 737 int min_temp, int max_temp) 738 { 739 int low_temp = 0 * 1000; 740 int high_temp = 255 * 1000; 741 742 if (low_temp < min_temp) 743 low_temp = min_temp; 744 if (high_temp > max_temp) 745 high_temp = max_temp; 746 if (high_temp < low_temp) { 747 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 748 return -EINVAL; 749 } 750 751 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); 752 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); 753 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); 754 755 rdev->pm.dpm.thermal.min_temp = low_temp; 756 rdev->pm.dpm.thermal.max_temp = high_temp; 757 758 return 0; 759 } 760 761 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor) 762 { 763 switch (sensor) { 764 case THERMAL_TYPE_RV6XX: 765 case THERMAL_TYPE_RV770: 766 case THERMAL_TYPE_EVERGREEN: 767 case THERMAL_TYPE_SUMO: 768 case THERMAL_TYPE_NI: 769 case THERMAL_TYPE_SI: 770 case THERMAL_TYPE_CI: 771 case THERMAL_TYPE_KV: 772 return true; 773 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 774 case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 775 return false; /* need special handling */ 776 case THERMAL_TYPE_NONE: 777 case THERMAL_TYPE_EXTERNAL: 778 case THERMAL_TYPE_EXTERNAL_GPIO: 779 default: 780 return false; 781 } 782 } 783 784 int r600_dpm_late_enable(struct radeon_device *rdev) 785 { 786 int ret; 787 788 if (rdev->irq.installed && 789 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 790 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 791 if (ret) 792 return ret; 793 rdev->irq.dpm_thermal = true; 794 radeon_irq_set(rdev); 795 } 796 797 return 0; 798 } 799 800 union power_info { 801 struct _ATOM_POWERPLAY_INFO info; 802 struct _ATOM_POWERPLAY_INFO_V2 info_2; 803 struct _ATOM_POWERPLAY_INFO_V3 info_3; 804 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 805 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 806 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 807 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 808 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 809 }; 810 811 union fan_info { 812 struct _ATOM_PPLIB_FANTABLE fan; 813 struct _ATOM_PPLIB_FANTABLE2 fan2; 814 }; 815 816 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, 817 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 818 { 819 u32 size = atom_table->ucNumEntries * 820 sizeof(struct radeon_clock_voltage_dependency_entry); 821 int i; 822 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 823 824 radeon_table->entries = kzalloc(size, GFP_KERNEL); 825 if (!radeon_table->entries) 826 return -ENOMEM; 827 828 entry = &atom_table->entries[0]; 829 for (i = 0; i < atom_table->ucNumEntries; i++) { 830 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 831 (entry->ucClockHigh << 16); 832 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); 833 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 834 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 835 } 836 radeon_table->count = atom_table->ucNumEntries; 837 838 return 0; 839 } 840 841 int r600_get_platform_caps(struct radeon_device *rdev) 842 { 843 struct radeon_mode_info *mode_info = &rdev->mode_info; 844 union power_info *power_info; 845 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 846 u16 data_offset; 847 u8 frev, crev; 848 849 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 850 &frev, &crev, &data_offset)) 851 return -EINVAL; 852 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 853 854 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 855 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 856 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 857 858 return 0; 859 } 860 861 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 862 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 863 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 864 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 865 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 866 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 867 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 868 869 int r600_parse_extended_power_table(struct radeon_device *rdev) 870 { 871 struct radeon_mode_info *mode_info = &rdev->mode_info; 872 union power_info *power_info; 873 union fan_info *fan_info; 874 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 875 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 876 u16 data_offset; 877 u8 frev, crev; 878 int ret, i; 879 880 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 881 &frev, &crev, &data_offset)) 882 return -EINVAL; 883 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 884 885 /* fan table */ 886 if (le16_to_cpu(power_info->pplib.usTableSize) >= 887 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 888 if (power_info->pplib3.usFanTableOffset) { 889 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 890 le16_to_cpu(power_info->pplib3.usFanTableOffset)); 891 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 892 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 893 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 894 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 895 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 896 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 897 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 898 if (fan_info->fan.ucFanTableFormat >= 2) 899 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 900 else 901 rdev->pm.dpm.fan.t_max = 10900; 902 rdev->pm.dpm.fan.cycle_delay = 100000; 903 rdev->pm.dpm.fan.ucode_fan_control = true; 904 } 905 } 906 907 /* clock dependancy tables, shedding tables */ 908 if (le16_to_cpu(power_info->pplib.usTableSize) >= 909 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 910 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 911 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 912 (mode_info->atom_context->bios + data_offset + 913 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 914 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 915 dep_table); 916 if (ret) 917 return ret; 918 } 919 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 920 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 921 (mode_info->atom_context->bios + data_offset + 922 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 923 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 924 dep_table); 925 if (ret) { 926 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 927 return ret; 928 } 929 } 930 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 931 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 932 (mode_info->atom_context->bios + data_offset + 933 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 934 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 935 dep_table); 936 if (ret) { 937 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 938 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 939 return ret; 940 } 941 } 942 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 943 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 944 (mode_info->atom_context->bios + data_offset + 945 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 946 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 947 dep_table); 948 if (ret) { 949 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 950 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 951 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 952 return ret; 953 } 954 } 955 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 956 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 957 (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 958 (mode_info->atom_context->bios + data_offset + 959 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 960 if (clk_v->ucNumEntries) { 961 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 962 le16_to_cpu(clk_v->entries[0].usSclkLow) | 963 (clk_v->entries[0].ucSclkHigh << 16); 964 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 965 le16_to_cpu(clk_v->entries[0].usMclkLow) | 966 (clk_v->entries[0].ucMclkHigh << 16); 967 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 968 le16_to_cpu(clk_v->entries[0].usVddc); 969 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 970 le16_to_cpu(clk_v->entries[0].usVddci); 971 } 972 } 973 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 974 ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 975 (ATOM_PPLIB_PhaseSheddingLimits_Table *) 976 (mode_info->atom_context->bios + data_offset + 977 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 978 ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 979 980 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 981 kzalloc(psl->ucNumEntries * 982 sizeof(struct radeon_phase_shedding_limits_entry), 983 GFP_KERNEL); 984 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 985 r600_free_extended_power_table(rdev); 986 return -ENOMEM; 987 } 988 989 entry = &psl->entries[0]; 990 for (i = 0; i < psl->ucNumEntries; i++) { 991 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 992 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 993 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 994 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 995 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 996 le16_to_cpu(entry->usVoltage); 997 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 998 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 999 } 1000 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 1001 psl->ucNumEntries; 1002 } 1003 } 1004 1005 /* cac data */ 1006 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1007 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 1008 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 1009 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 1010 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit; 1011 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 1012 if (rdev->pm.dpm.tdp_od_limit) 1013 rdev->pm.dpm.power_control = true; 1014 else 1015 rdev->pm.dpm.power_control = false; 1016 rdev->pm.dpm.tdp_adjustment = 0; 1017 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 1018 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 1019 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 1020 if (power_info->pplib5.usCACLeakageTableOffset) { 1021 ATOM_PPLIB_CAC_Leakage_Table *cac_table = 1022 (ATOM_PPLIB_CAC_Leakage_Table *) 1023 (mode_info->atom_context->bios + data_offset + 1024 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 1025 ATOM_PPLIB_CAC_Leakage_Record *entry; 1026 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table); 1027 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 1028 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 1029 r600_free_extended_power_table(rdev); 1030 return -ENOMEM; 1031 } 1032 entry = &cac_table->entries[0]; 1033 for (i = 0; i < cac_table->ucNumEntries; i++) { 1034 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1035 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 1036 le16_to_cpu(entry->usVddc1); 1037 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 1038 le16_to_cpu(entry->usVddc2); 1039 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 1040 le16_to_cpu(entry->usVddc3); 1041 } else { 1042 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 1043 le16_to_cpu(entry->usVddc); 1044 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 1045 le32_to_cpu(entry->ulLeakageValue); 1046 } 1047 entry = (ATOM_PPLIB_CAC_Leakage_Record *) 1048 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 1049 } 1050 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 1051 } 1052 } 1053 1054 /* ext tables */ 1055 if (le16_to_cpu(power_info->pplib.usTableSize) >= 1056 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 1057 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 1058 (mode_info->atom_context->bios + data_offset + 1059 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 1060 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 1061 ext_hdr->usVCETableOffset) { 1062 VCEClockInfoArray *array = (VCEClockInfoArray *) 1063 (mode_info->atom_context->bios + data_offset + 1064 le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 1065 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 1066 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 1067 (mode_info->atom_context->bios + data_offset + 1068 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1069 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 1070 ATOM_PPLIB_VCE_State_Table *states = 1071 (ATOM_PPLIB_VCE_State_Table *) 1072 (mode_info->atom_context->bios + data_offset + 1073 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 1074 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 1075 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 1076 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 1077 ATOM_PPLIB_VCE_State_Record *state_entry; 1078 VCEClockInfo *vce_clk; 1079 u32 size = limits->numEntries * 1080 sizeof(struct radeon_vce_clock_voltage_dependency_entry); 1081 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 1082 kzalloc(size, GFP_KERNEL); 1083 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 1084 r600_free_extended_power_table(rdev); 1085 return -ENOMEM; 1086 } 1087 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 1088 limits->numEntries; 1089 entry = &limits->entries[0]; 1090 state_entry = &states->entries[0]; 1091 for (i = 0; i < limits->numEntries; i++) { 1092 vce_clk = (VCEClockInfo *) 1093 ((u8 *)&array->entries[0] + 1094 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 1095 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 1096 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 1097 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 1098 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 1099 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 1100 le16_to_cpu(entry->usVoltage); 1101 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 1102 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 1103 } 1104 for (i = 0; i < states->numEntries; i++) { 1105 if (i >= RADEON_MAX_VCE_LEVELS) 1106 break; 1107 vce_clk = (VCEClockInfo *) 1108 ((u8 *)&array->entries[0] + 1109 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 1110 rdev->pm.dpm.vce_states[i].evclk = 1111 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 1112 rdev->pm.dpm.vce_states[i].ecclk = 1113 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 1114 rdev->pm.dpm.vce_states[i].clk_idx = 1115 state_entry->ucClockInfoIndex & 0x3f; 1116 rdev->pm.dpm.vce_states[i].pstate = 1117 (state_entry->ucClockInfoIndex & 0xc0) >> 6; 1118 state_entry = (ATOM_PPLIB_VCE_State_Record *) 1119 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 1120 } 1121 } 1122 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 1123 ext_hdr->usUVDTableOffset) { 1124 UVDClockInfoArray *array = (UVDClockInfoArray *) 1125 (mode_info->atom_context->bios + data_offset + 1126 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 1127 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 1128 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 1129 (mode_info->atom_context->bios + data_offset + 1130 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 1131 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 1132 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 1133 u32 size = limits->numEntries * 1134 sizeof(struct radeon_uvd_clock_voltage_dependency_entry); 1135 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 1136 kzalloc(size, GFP_KERNEL); 1137 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 1138 r600_free_extended_power_table(rdev); 1139 return -ENOMEM; 1140 } 1141 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 1142 limits->numEntries; 1143 entry = &limits->entries[0]; 1144 for (i = 0; i < limits->numEntries; i++) { 1145 UVDClockInfo *uvd_clk = (UVDClockInfo *) 1146 ((u8 *)&array->entries[0] + 1147 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 1148 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 1149 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 1150 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1151 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1152 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1153 le16_to_cpu(entry->usVoltage); 1154 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1155 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1156 } 1157 } 1158 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 1159 ext_hdr->usSAMUTableOffset) { 1160 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 1161 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 1162 (mode_info->atom_context->bios + data_offset + 1163 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 1164 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 1165 u32 size = limits->numEntries * 1166 sizeof(struct radeon_clock_voltage_dependency_entry); 1167 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 1168 kzalloc(size, GFP_KERNEL); 1169 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 1170 r600_free_extended_power_table(rdev); 1171 return -ENOMEM; 1172 } 1173 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 1174 limits->numEntries; 1175 entry = &limits->entries[0]; 1176 for (i = 0; i < limits->numEntries; i++) { 1177 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 1178 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 1179 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 1180 le16_to_cpu(entry->usVoltage); 1181 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 1182 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 1183 } 1184 } 1185 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 1186 ext_hdr->usPPMTableOffset) { 1187 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 1188 (mode_info->atom_context->bios + data_offset + 1189 le16_to_cpu(ext_hdr->usPPMTableOffset)); 1190 rdev->pm.dpm.dyn_state.ppm_table = 1191 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL); 1192 if (!rdev->pm.dpm.dyn_state.ppm_table) { 1193 r600_free_extended_power_table(rdev); 1194 return -ENOMEM; 1195 } 1196 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 1197 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 1198 le16_to_cpu(ppm->usCpuCoreNumber); 1199 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp = 1200 le32_to_cpu(ppm->ulPlatformTDP); 1201 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 1202 le32_to_cpu(ppm->ulSmallACPlatformTDP); 1203 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc = 1204 le32_to_cpu(ppm->ulPlatformTDC); 1205 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 1206 le32_to_cpu(ppm->ulSmallACPlatformTDC); 1207 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp = 1208 le32_to_cpu(ppm->ulApuTDP); 1209 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 1210 le32_to_cpu(ppm->ulDGpuTDP); 1211 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 1212 le32_to_cpu(ppm->ulDGpuUlvPower); 1213 rdev->pm.dpm.dyn_state.ppm_table->tj_max = 1214 le32_to_cpu(ppm->ulTjmax); 1215 } 1216 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 1217 ext_hdr->usACPTableOffset) { 1218 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 1219 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 1220 (mode_info->atom_context->bios + data_offset + 1221 le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 1222 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 1223 u32 size = limits->numEntries * 1224 sizeof(struct radeon_clock_voltage_dependency_entry); 1225 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 1226 kzalloc(size, GFP_KERNEL); 1227 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 1228 r600_free_extended_power_table(rdev); 1229 return -ENOMEM; 1230 } 1231 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 1232 limits->numEntries; 1233 entry = &limits->entries[0]; 1234 for (i = 0; i < limits->numEntries; i++) { 1235 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 1236 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 1237 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 1238 le16_to_cpu(entry->usVoltage); 1239 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 1240 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 1241 } 1242 } 1243 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 1244 ext_hdr->usPowerTuneTableOffset) { 1245 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 1246 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1247 ATOM_PowerTune_Table *pt; 1248 rdev->pm.dpm.dyn_state.cac_tdp_table = 1249 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL); 1250 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) { 1251 r600_free_extended_power_table(rdev); 1252 return -ENOMEM; 1253 } 1254 if (rev > 0) { 1255 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 1256 (mode_info->atom_context->bios + data_offset + 1257 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1258 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 1259 ppt->usMaximumPowerDeliveryLimit; 1260 pt = &ppt->power_tune_table; 1261 } else { 1262 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 1263 (mode_info->atom_context->bios + data_offset + 1264 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1265 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 1266 pt = &ppt->power_tune_table; 1267 } 1268 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 1269 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 1270 le16_to_cpu(pt->usConfigurableTDP); 1271 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 1272 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 1273 le16_to_cpu(pt->usBatteryPowerLimit); 1274 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 1275 le16_to_cpu(pt->usSmallPowerLimit); 1276 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 1277 le16_to_cpu(pt->usLowCACLeakage); 1278 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 1279 le16_to_cpu(pt->usHighCACLeakage); 1280 } 1281 } 1282 1283 return 0; 1284 } 1285 1286 void r600_free_extended_power_table(struct radeon_device *rdev) 1287 { 1288 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; 1289 1290 kfree(dyn_state->vddc_dependency_on_sclk.entries); 1291 kfree(dyn_state->vddci_dependency_on_mclk.entries); 1292 kfree(dyn_state->vddc_dependency_on_mclk.entries); 1293 kfree(dyn_state->mvdd_dependency_on_mclk.entries); 1294 kfree(dyn_state->cac_leakage_table.entries); 1295 kfree(dyn_state->phase_shedding_limits_table.entries); 1296 kfree(dyn_state->ppm_table); 1297 kfree(dyn_state->cac_tdp_table); 1298 kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 1299 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 1300 kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 1301 kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 1302 } 1303 1304 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1305 u32 sys_mask, 1306 enum radeon_pcie_gen asic_gen, 1307 enum radeon_pcie_gen default_gen) 1308 { 1309 switch (asic_gen) { 1310 case RADEON_PCIE_GEN1: 1311 return RADEON_PCIE_GEN1; 1312 case RADEON_PCIE_GEN2: 1313 return RADEON_PCIE_GEN2; 1314 case RADEON_PCIE_GEN3: 1315 return RADEON_PCIE_GEN3; 1316 default: 1317 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3)) 1318 return RADEON_PCIE_GEN3; 1319 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2)) 1320 return RADEON_PCIE_GEN2; 1321 else 1322 return RADEON_PCIE_GEN1; 1323 } 1324 return RADEON_PCIE_GEN1; 1325 } 1326 1327 u16 r600_get_pcie_lane_support(struct radeon_device *rdev, 1328 u16 asic_lanes, 1329 u16 default_lanes) 1330 { 1331 switch (asic_lanes) { 1332 case 0: 1333 default: 1334 return default_lanes; 1335 case 1: 1336 return 1; 1337 case 2: 1338 return 2; 1339 case 4: 1340 return 4; 1341 case 8: 1342 return 8; 1343 case 12: 1344 return 12; 1345 case 16: 1346 return 16; 1347 } 1348 } 1349 1350 u8 r600_encode_pci_lane_width(u32 lanes) 1351 { 1352 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; 1353 1354 if (lanes > 16) 1355 return 0; 1356 1357 return encoded_lanes[lanes]; 1358 } 1359