1 /* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 */ 23 24 #include <linux/hwmon-sysfs.h> 25 #include <linux/hwmon.h> 26 #include <linux/pci.h> 27 #include <linux/power_supply.h> 28 29 #include <drm/drm_debugfs.h> 30 #include <drm/drm_vblank.h> 31 32 #include "atom.h" 33 #include "avivod.h" 34 #include "r600_dpm.h" 35 #include "radeon.h" 36 37 #define RADEON_IDLE_LOOP_MS 100 38 #define RADEON_RECLOCK_DELAY_MS 200 39 #define RADEON_WAIT_VBLANK_TIMEOUT 200 40 41 static const char *radeon_pm_state_type_name[5] = { 42 "", 43 "Powersave", 44 "Battery", 45 "Balanced", 46 "Performance", 47 }; 48 49 static void radeon_dynpm_idle_work_handler(struct work_struct *work); 50 static int radeon_debugfs_pm_init(struct radeon_device *rdev); 51 static bool radeon_pm_in_vbl(struct radeon_device *rdev); 52 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 53 static void radeon_pm_update_profile(struct radeon_device *rdev); 54 static void radeon_pm_set_clocks(struct radeon_device *rdev); 55 56 int radeon_pm_get_type_index(struct radeon_device *rdev, 57 enum radeon_pm_state_type ps_type, 58 int instance) 59 { 60 int i; 61 int found_instance = -1; 62 63 for (i = 0; i < rdev->pm.num_power_states; i++) { 64 if (rdev->pm.power_state[i].type == ps_type) { 65 found_instance++; 66 if (found_instance == instance) 67 return i; 68 } 69 } 70 /* return default if no match */ 71 return rdev->pm.default_power_state_index; 72 } 73 74 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 75 { 76 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 77 mutex_lock(&rdev->pm.mutex); 78 if (power_supply_is_system_supplied() > 0) 79 rdev->pm.dpm.ac_power = true; 80 else 81 rdev->pm.dpm.ac_power = false; 82 if (rdev->family == CHIP_ARUBA) { 83 if (rdev->asic->dpm.enable_bapm) 84 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 85 } 86 mutex_unlock(&rdev->pm.mutex); 87 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 88 if (rdev->pm.profile == PM_PROFILE_AUTO) { 89 mutex_lock(&rdev->pm.mutex); 90 radeon_pm_update_profile(rdev); 91 radeon_pm_set_clocks(rdev); 92 mutex_unlock(&rdev->pm.mutex); 93 } 94 } 95 } 96 97 static void radeon_pm_update_profile(struct radeon_device *rdev) 98 { 99 switch (rdev->pm.profile) { 100 case PM_PROFILE_DEFAULT: 101 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 102 break; 103 case PM_PROFILE_AUTO: 104 if (power_supply_is_system_supplied() > 0) { 105 if (rdev->pm.active_crtc_count > 1) 106 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 107 else 108 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 109 } else { 110 if (rdev->pm.active_crtc_count > 1) 111 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 112 else 113 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 114 } 115 break; 116 case PM_PROFILE_LOW: 117 if (rdev->pm.active_crtc_count > 1) 118 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 119 else 120 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 121 break; 122 case PM_PROFILE_MID: 123 if (rdev->pm.active_crtc_count > 1) 124 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 125 else 126 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 127 break; 128 case PM_PROFILE_HIGH: 129 if (rdev->pm.active_crtc_count > 1) 130 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 131 else 132 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 133 break; 134 } 135 136 if (rdev->pm.active_crtc_count == 0) { 137 rdev->pm.requested_power_state_index = 138 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 139 rdev->pm.requested_clock_mode_index = 140 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 141 } else { 142 rdev->pm.requested_power_state_index = 143 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 144 rdev->pm.requested_clock_mode_index = 145 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 146 } 147 } 148 149 static void radeon_unmap_vram_bos(struct radeon_device *rdev) 150 { 151 struct radeon_bo *bo, *n; 152 153 if (list_empty(&rdev->gem.objects)) 154 return; 155 156 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 157 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 158 ttm_bo_unmap_virtual(&bo->tbo); 159 } 160 } 161 162 static void radeon_sync_with_vblank(struct radeon_device *rdev) 163 { 164 if (rdev->pm.active_crtcs) { 165 rdev->pm.vblank_sync = false; 166 wait_event_timeout( 167 rdev->irq.vblank_queue, rdev->pm.vblank_sync, 168 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 169 } 170 } 171 172 static void radeon_set_power_state(struct radeon_device *rdev) 173 { 174 u32 sclk, mclk; 175 bool misc_after = false; 176 177 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 178 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 179 return; 180 181 if (radeon_gui_idle(rdev)) { 182 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 183 clock_info[rdev->pm.requested_clock_mode_index].sclk; 184 if (sclk > rdev->pm.default_sclk) 185 sclk = rdev->pm.default_sclk; 186 187 /* starting with BTC, there is one state that is used for both 188 * MH and SH. Difference is that we always use the high clock index for 189 * mclk and vddci. 190 */ 191 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 192 (rdev->family >= CHIP_BARTS) && 193 rdev->pm.active_crtc_count && 194 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 195 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 196 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 197 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 198 else 199 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 200 clock_info[rdev->pm.requested_clock_mode_index].mclk; 201 202 if (mclk > rdev->pm.default_mclk) 203 mclk = rdev->pm.default_mclk; 204 205 /* upvolt before raising clocks, downvolt after lowering clocks */ 206 if (sclk < rdev->pm.current_sclk) 207 misc_after = true; 208 209 radeon_sync_with_vblank(rdev); 210 211 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 212 if (!radeon_pm_in_vbl(rdev)) 213 return; 214 } 215 216 radeon_pm_prepare(rdev); 217 218 if (!misc_after) 219 /* voltage, pcie lanes, etc.*/ 220 radeon_pm_misc(rdev); 221 222 /* set engine clock */ 223 if (sclk != rdev->pm.current_sclk) { 224 radeon_pm_debug_check_in_vbl(rdev, false); 225 radeon_set_engine_clock(rdev, sclk); 226 radeon_pm_debug_check_in_vbl(rdev, true); 227 rdev->pm.current_sclk = sclk; 228 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 229 } 230 231 /* set memory clock */ 232 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 233 radeon_pm_debug_check_in_vbl(rdev, false); 234 radeon_set_memory_clock(rdev, mclk); 235 radeon_pm_debug_check_in_vbl(rdev, true); 236 rdev->pm.current_mclk = mclk; 237 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 238 } 239 240 if (misc_after) 241 /* voltage, pcie lanes, etc.*/ 242 radeon_pm_misc(rdev); 243 244 radeon_pm_finish(rdev); 245 246 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 247 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 248 } else 249 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 250 } 251 252 static void radeon_pm_set_clocks(struct radeon_device *rdev) 253 { 254 struct drm_crtc *crtc; 255 int i, r; 256 257 /* no need to take locks, etc. if nothing's going to change */ 258 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 259 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 260 return; 261 262 down_write(&rdev->pm.mclk_lock); 263 mutex_lock(&rdev->ring_lock); 264 265 /* wait for the rings to drain */ 266 for (i = 0; i < RADEON_NUM_RINGS; i++) { 267 struct radeon_ring *ring = &rdev->ring[i]; 268 if (!ring->ready) { 269 continue; 270 } 271 r = radeon_fence_wait_empty(rdev, i); 272 if (r) { 273 /* needs a GPU reset dont reset here */ 274 mutex_unlock(&rdev->ring_lock); 275 up_write(&rdev->pm.mclk_lock); 276 return; 277 } 278 } 279 280 radeon_unmap_vram_bos(rdev); 281 282 if (rdev->irq.installed) { 283 i = 0; 284 drm_for_each_crtc(crtc, rdev->ddev) { 285 if (rdev->pm.active_crtcs & (1 << i)) { 286 /* This can fail if a modeset is in progress */ 287 if (drm_crtc_vblank_get(crtc) == 0) 288 rdev->pm.req_vblank |= (1 << i); 289 else 290 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", 291 i); 292 } 293 i++; 294 } 295 } 296 297 radeon_set_power_state(rdev); 298 299 if (rdev->irq.installed) { 300 i = 0; 301 drm_for_each_crtc(crtc, rdev->ddev) { 302 if (rdev->pm.req_vblank & (1 << i)) { 303 rdev->pm.req_vblank &= ~(1 << i); 304 drm_crtc_vblank_put(crtc); 305 } 306 i++; 307 } 308 } 309 310 /* update display watermarks based on new power state */ 311 radeon_update_bandwidth_info(rdev); 312 if (rdev->pm.active_crtc_count) 313 radeon_bandwidth_update(rdev); 314 315 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 316 317 mutex_unlock(&rdev->ring_lock); 318 up_write(&rdev->pm.mclk_lock); 319 } 320 321 static void radeon_pm_print_states(struct radeon_device *rdev) 322 { 323 int i, j; 324 struct radeon_power_state *power_state; 325 struct radeon_pm_clock_info *clock_info; 326 327 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 328 for (i = 0; i < rdev->pm.num_power_states; i++) { 329 power_state = &rdev->pm.power_state[i]; 330 DRM_DEBUG_DRIVER("State %d: %s\n", i, 331 radeon_pm_state_type_name[power_state->type]); 332 if (i == rdev->pm.default_power_state_index) 333 DRM_DEBUG_DRIVER("\tDefault"); 334 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 335 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 336 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 337 DRM_DEBUG_DRIVER("\tSingle display only\n"); 338 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 339 for (j = 0; j < power_state->num_clock_modes; j++) { 340 clock_info = &(power_state->clock_info[j]); 341 if (rdev->flags & RADEON_IS_IGP) 342 DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 343 j, 344 clock_info->sclk * 10); 345 else 346 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 347 j, 348 clock_info->sclk * 10, 349 clock_info->mclk * 10, 350 clock_info->voltage.voltage); 351 } 352 } 353 } 354 355 static ssize_t radeon_get_pm_profile(struct device *dev, 356 struct device_attribute *attr, 357 char *buf) 358 { 359 struct drm_device *ddev = dev_get_drvdata(dev); 360 struct radeon_device *rdev = ddev->dev_private; 361 int cp = rdev->pm.profile; 362 363 return snprintf(buf, PAGE_SIZE, "%s\n", 364 (cp == PM_PROFILE_AUTO) ? "auto" : 365 (cp == PM_PROFILE_LOW) ? "low" : 366 (cp == PM_PROFILE_MID) ? "mid" : 367 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 368 } 369 370 static ssize_t radeon_set_pm_profile(struct device *dev, 371 struct device_attribute *attr, 372 const char *buf, 373 size_t count) 374 { 375 struct drm_device *ddev = dev_get_drvdata(dev); 376 struct radeon_device *rdev = ddev->dev_private; 377 378 /* Can't set profile when the card is off */ 379 if ((rdev->flags & RADEON_IS_PX) && 380 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 381 return -EINVAL; 382 383 mutex_lock(&rdev->pm.mutex); 384 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 385 if (strncmp("default", buf, strlen("default")) == 0) 386 rdev->pm.profile = PM_PROFILE_DEFAULT; 387 else if (strncmp("auto", buf, strlen("auto")) == 0) 388 rdev->pm.profile = PM_PROFILE_AUTO; 389 else if (strncmp("low", buf, strlen("low")) == 0) 390 rdev->pm.profile = PM_PROFILE_LOW; 391 else if (strncmp("mid", buf, strlen("mid")) == 0) 392 rdev->pm.profile = PM_PROFILE_MID; 393 else if (strncmp("high", buf, strlen("high")) == 0) 394 rdev->pm.profile = PM_PROFILE_HIGH; 395 else { 396 count = -EINVAL; 397 goto fail; 398 } 399 radeon_pm_update_profile(rdev); 400 radeon_pm_set_clocks(rdev); 401 } else 402 count = -EINVAL; 403 404 fail: 405 mutex_unlock(&rdev->pm.mutex); 406 407 return count; 408 } 409 410 static ssize_t radeon_get_pm_method(struct device *dev, 411 struct device_attribute *attr, 412 char *buf) 413 { 414 struct drm_device *ddev = dev_get_drvdata(dev); 415 struct radeon_device *rdev = ddev->dev_private; 416 int pm = rdev->pm.pm_method; 417 418 return snprintf(buf, PAGE_SIZE, "%s\n", 419 (pm == PM_METHOD_DYNPM) ? "dynpm" : 420 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm"); 421 } 422 423 static ssize_t radeon_set_pm_method(struct device *dev, 424 struct device_attribute *attr, 425 const char *buf, 426 size_t count) 427 { 428 struct drm_device *ddev = dev_get_drvdata(dev); 429 struct radeon_device *rdev = ddev->dev_private; 430 431 /* Can't set method when the card is off */ 432 if ((rdev->flags & RADEON_IS_PX) && 433 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 434 count = -EINVAL; 435 goto fail; 436 } 437 438 /* we don't support the legacy modes with dpm */ 439 if (rdev->pm.pm_method == PM_METHOD_DPM) { 440 count = -EINVAL; 441 goto fail; 442 } 443 444 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 445 mutex_lock(&rdev->pm.mutex); 446 rdev->pm.pm_method = PM_METHOD_DYNPM; 447 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 448 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 449 mutex_unlock(&rdev->pm.mutex); 450 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 451 mutex_lock(&rdev->pm.mutex); 452 /* disable dynpm */ 453 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 454 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 455 rdev->pm.pm_method = PM_METHOD_PROFILE; 456 mutex_unlock(&rdev->pm.mutex); 457 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 458 } else { 459 count = -EINVAL; 460 goto fail; 461 } 462 radeon_pm_compute_clocks(rdev); 463 fail: 464 return count; 465 } 466 467 static ssize_t radeon_get_dpm_state(struct device *dev, 468 struct device_attribute *attr, 469 char *buf) 470 { 471 struct drm_device *ddev = dev_get_drvdata(dev); 472 struct radeon_device *rdev = ddev->dev_private; 473 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 474 475 return snprintf(buf, PAGE_SIZE, "%s\n", 476 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 477 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 478 } 479 480 static ssize_t radeon_set_dpm_state(struct device *dev, 481 struct device_attribute *attr, 482 const char *buf, 483 size_t count) 484 { 485 struct drm_device *ddev = dev_get_drvdata(dev); 486 struct radeon_device *rdev = ddev->dev_private; 487 488 mutex_lock(&rdev->pm.mutex); 489 if (strncmp("battery", buf, strlen("battery")) == 0) 490 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 491 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 492 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 493 else if (strncmp("performance", buf, strlen("performance")) == 0) 494 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 495 else { 496 mutex_unlock(&rdev->pm.mutex); 497 count = -EINVAL; 498 goto fail; 499 } 500 mutex_unlock(&rdev->pm.mutex); 501 502 /* Can't set dpm state when the card is off */ 503 if (!(rdev->flags & RADEON_IS_PX) || 504 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 505 radeon_pm_compute_clocks(rdev); 506 507 fail: 508 return count; 509 } 510 511 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, 512 struct device_attribute *attr, 513 char *buf) 514 { 515 struct drm_device *ddev = dev_get_drvdata(dev); 516 struct radeon_device *rdev = ddev->dev_private; 517 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 518 519 if ((rdev->flags & RADEON_IS_PX) && 520 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 521 return snprintf(buf, PAGE_SIZE, "off\n"); 522 523 return snprintf(buf, PAGE_SIZE, "%s\n", 524 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 525 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 526 } 527 528 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, 529 struct device_attribute *attr, 530 const char *buf, 531 size_t count) 532 { 533 struct drm_device *ddev = dev_get_drvdata(dev); 534 struct radeon_device *rdev = ddev->dev_private; 535 enum radeon_dpm_forced_level level; 536 int ret = 0; 537 538 /* Can't force performance level when the card is off */ 539 if ((rdev->flags & RADEON_IS_PX) && 540 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 541 return -EINVAL; 542 543 mutex_lock(&rdev->pm.mutex); 544 if (strncmp("low", buf, strlen("low")) == 0) { 545 level = RADEON_DPM_FORCED_LEVEL_LOW; 546 } else if (strncmp("high", buf, strlen("high")) == 0) { 547 level = RADEON_DPM_FORCED_LEVEL_HIGH; 548 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 549 level = RADEON_DPM_FORCED_LEVEL_AUTO; 550 } else { 551 count = -EINVAL; 552 goto fail; 553 } 554 if (rdev->asic->dpm.force_performance_level) { 555 if (rdev->pm.dpm.thermal_active) { 556 count = -EINVAL; 557 goto fail; 558 } 559 ret = radeon_dpm_force_performance_level(rdev, level); 560 if (ret) 561 count = -EINVAL; 562 } 563 fail: 564 mutex_unlock(&rdev->pm.mutex); 565 566 return count; 567 } 568 569 static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev, 570 struct device_attribute *attr, 571 char *buf) 572 { 573 struct radeon_device *rdev = dev_get_drvdata(dev); 574 u32 pwm_mode = 0; 575 576 if (rdev->asic->dpm.fan_ctrl_get_mode) 577 pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev); 578 579 /* never 0 (full-speed), fuse or smc-controlled always */ 580 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); 581 } 582 583 static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev, 584 struct device_attribute *attr, 585 const char *buf, 586 size_t count) 587 { 588 struct radeon_device *rdev = dev_get_drvdata(dev); 589 int err; 590 int value; 591 592 if(!rdev->asic->dpm.fan_ctrl_set_mode) 593 return -EINVAL; 594 595 err = kstrtoint(buf, 10, &value); 596 if (err) 597 return err; 598 599 switch (value) { 600 case 1: /* manual, percent-based */ 601 rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC); 602 break; 603 default: /* disable */ 604 rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0); 605 break; 606 } 607 608 return count; 609 } 610 611 static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev, 612 struct device_attribute *attr, 613 char *buf) 614 { 615 return sprintf(buf, "%i\n", 0); 616 } 617 618 static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev, 619 struct device_attribute *attr, 620 char *buf) 621 { 622 return sprintf(buf, "%i\n", 255); 623 } 624 625 static ssize_t radeon_hwmon_set_pwm1(struct device *dev, 626 struct device_attribute *attr, 627 const char *buf, size_t count) 628 { 629 struct radeon_device *rdev = dev_get_drvdata(dev); 630 int err; 631 u32 value; 632 633 err = kstrtou32(buf, 10, &value); 634 if (err) 635 return err; 636 637 value = (value * 100) / 255; 638 639 err = rdev->asic->dpm.set_fan_speed_percent(rdev, value); 640 if (err) 641 return err; 642 643 return count; 644 } 645 646 static ssize_t radeon_hwmon_get_pwm1(struct device *dev, 647 struct device_attribute *attr, 648 char *buf) 649 { 650 struct radeon_device *rdev = dev_get_drvdata(dev); 651 int err; 652 u32 speed; 653 654 err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed); 655 if (err) 656 return err; 657 658 speed = (speed * 255) / 100; 659 660 return sprintf(buf, "%i\n", speed); 661 } 662 663 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 664 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 665 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state); 666 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 667 radeon_get_dpm_forced_performance_level, 668 radeon_set_dpm_forced_performance_level); 669 670 static ssize_t radeon_hwmon_show_temp(struct device *dev, 671 struct device_attribute *attr, 672 char *buf) 673 { 674 struct radeon_device *rdev = dev_get_drvdata(dev); 675 struct drm_device *ddev = rdev->ddev; 676 int temp; 677 678 /* Can't get temperature when the card is off */ 679 if ((rdev->flags & RADEON_IS_PX) && 680 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 681 return -EINVAL; 682 683 if (rdev->asic->pm.get_temperature) 684 temp = radeon_get_temperature(rdev); 685 else 686 temp = 0; 687 688 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 689 } 690 691 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, 692 struct device_attribute *attr, 693 char *buf) 694 { 695 struct radeon_device *rdev = dev_get_drvdata(dev); 696 int hyst = to_sensor_dev_attr(attr)->index; 697 int temp; 698 699 if (hyst) 700 temp = rdev->pm.dpm.thermal.min_temp; 701 else 702 temp = rdev->pm.dpm.thermal.max_temp; 703 704 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 705 } 706 707 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 708 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 709 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 710 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0); 711 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0); 712 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0); 713 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0); 714 715 716 static struct attribute *hwmon_attributes[] = { 717 &sensor_dev_attr_temp1_input.dev_attr.attr, 718 &sensor_dev_attr_temp1_crit.dev_attr.attr, 719 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 720 &sensor_dev_attr_pwm1.dev_attr.attr, 721 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 722 &sensor_dev_attr_pwm1_min.dev_attr.attr, 723 &sensor_dev_attr_pwm1_max.dev_attr.attr, 724 NULL 725 }; 726 727 static umode_t hwmon_attributes_visible(struct kobject *kobj, 728 struct attribute *attr, int index) 729 { 730 struct device *dev = kobj_to_dev(kobj); 731 struct radeon_device *rdev = dev_get_drvdata(dev); 732 umode_t effective_mode = attr->mode; 733 734 /* Skip attributes if DPM is not enabled */ 735 if (rdev->pm.pm_method != PM_METHOD_DPM && 736 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 737 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 738 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 739 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 740 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 741 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 742 return 0; 743 744 /* Skip fan attributes if fan is not present */ 745 if (rdev->pm.no_fan && 746 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 747 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 748 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 749 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 750 return 0; 751 752 /* mask fan attributes if we have no bindings for this asic to expose */ 753 if ((!rdev->asic->dpm.get_fan_speed_percent && 754 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 755 (!rdev->asic->dpm.fan_ctrl_get_mode && 756 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 757 effective_mode &= ~S_IRUGO; 758 759 if ((!rdev->asic->dpm.set_fan_speed_percent && 760 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 761 (!rdev->asic->dpm.fan_ctrl_set_mode && 762 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 763 effective_mode &= ~S_IWUSR; 764 765 /* hide max/min values if we can't both query and manage the fan */ 766 if ((!rdev->asic->dpm.set_fan_speed_percent && 767 !rdev->asic->dpm.get_fan_speed_percent) && 768 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 769 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 770 return 0; 771 772 return effective_mode; 773 } 774 775 static const struct attribute_group hwmon_attrgroup = { 776 .attrs = hwmon_attributes, 777 .is_visible = hwmon_attributes_visible, 778 }; 779 780 static const struct attribute_group *hwmon_groups[] = { 781 &hwmon_attrgroup, 782 NULL 783 }; 784 785 static int radeon_hwmon_init(struct radeon_device *rdev) 786 { 787 int err = 0; 788 789 switch (rdev->pm.int_thermal_type) { 790 case THERMAL_TYPE_RV6XX: 791 case THERMAL_TYPE_RV770: 792 case THERMAL_TYPE_EVERGREEN: 793 case THERMAL_TYPE_NI: 794 case THERMAL_TYPE_SUMO: 795 case THERMAL_TYPE_SI: 796 case THERMAL_TYPE_CI: 797 case THERMAL_TYPE_KV: 798 if (rdev->asic->pm.get_temperature == NULL) 799 return err; 800 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev, 801 "radeon", rdev, 802 hwmon_groups); 803 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 804 err = PTR_ERR(rdev->pm.int_hwmon_dev); 805 dev_err(rdev->dev, 806 "Unable to register hwmon device: %d\n", err); 807 } 808 break; 809 default: 810 break; 811 } 812 813 return err; 814 } 815 816 static void radeon_hwmon_fini(struct radeon_device *rdev) 817 { 818 if (rdev->pm.int_hwmon_dev) 819 hwmon_device_unregister(rdev->pm.int_hwmon_dev); 820 } 821 822 static void radeon_dpm_thermal_work_handler(struct work_struct *work) 823 { 824 struct radeon_device *rdev = 825 container_of(work, struct radeon_device, 826 pm.dpm.thermal.work); 827 /* switch to the thermal state */ 828 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 829 830 if (!rdev->pm.dpm_enabled) 831 return; 832 833 if (rdev->asic->pm.get_temperature) { 834 int temp = radeon_get_temperature(rdev); 835 836 if (temp < rdev->pm.dpm.thermal.min_temp) 837 /* switch back the user state */ 838 dpm_state = rdev->pm.dpm.user_state; 839 } else { 840 if (rdev->pm.dpm.thermal.high_to_low) 841 /* switch back the user state */ 842 dpm_state = rdev->pm.dpm.user_state; 843 } 844 mutex_lock(&rdev->pm.mutex); 845 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 846 rdev->pm.dpm.thermal_active = true; 847 else 848 rdev->pm.dpm.thermal_active = false; 849 rdev->pm.dpm.state = dpm_state; 850 mutex_unlock(&rdev->pm.mutex); 851 852 radeon_pm_compute_clocks(rdev); 853 } 854 855 static bool radeon_dpm_single_display(struct radeon_device *rdev) 856 { 857 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? 858 true : false; 859 860 /* check if the vblank period is too short to adjust the mclk */ 861 if (single_display && rdev->asic->dpm.vblank_too_short) { 862 if (radeon_dpm_vblank_too_short(rdev)) 863 single_display = false; 864 } 865 866 /* 120hz tends to be problematic even if they are under the 867 * vblank limit. 868 */ 869 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) 870 single_display = false; 871 872 return single_display; 873 } 874 875 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 876 enum radeon_pm_state_type dpm_state) 877 { 878 int i; 879 struct radeon_ps *ps; 880 u32 ui_class; 881 bool single_display = radeon_dpm_single_display(rdev); 882 883 /* certain older asics have a separare 3D performance state, 884 * so try that first if the user selected performance 885 */ 886 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 887 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 888 /* balanced states don't exist at the moment */ 889 if (dpm_state == POWER_STATE_TYPE_BALANCED) 890 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 891 892 restart_search: 893 /* Pick the best power state based on current conditions */ 894 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 895 ps = &rdev->pm.dpm.ps[i]; 896 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 897 switch (dpm_state) { 898 /* user states */ 899 case POWER_STATE_TYPE_BATTERY: 900 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 901 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 902 if (single_display) 903 return ps; 904 } else 905 return ps; 906 } 907 break; 908 case POWER_STATE_TYPE_BALANCED: 909 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 910 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 911 if (single_display) 912 return ps; 913 } else 914 return ps; 915 } 916 break; 917 case POWER_STATE_TYPE_PERFORMANCE: 918 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 919 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 920 if (single_display) 921 return ps; 922 } else 923 return ps; 924 } 925 break; 926 /* internal states */ 927 case POWER_STATE_TYPE_INTERNAL_UVD: 928 if (rdev->pm.dpm.uvd_ps) 929 return rdev->pm.dpm.uvd_ps; 930 else 931 break; 932 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 933 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 934 return ps; 935 break; 936 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 937 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 938 return ps; 939 break; 940 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 941 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 942 return ps; 943 break; 944 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 945 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 946 return ps; 947 break; 948 case POWER_STATE_TYPE_INTERNAL_BOOT: 949 return rdev->pm.dpm.boot_ps; 950 case POWER_STATE_TYPE_INTERNAL_THERMAL: 951 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 952 return ps; 953 break; 954 case POWER_STATE_TYPE_INTERNAL_ACPI: 955 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 956 return ps; 957 break; 958 case POWER_STATE_TYPE_INTERNAL_ULV: 959 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 960 return ps; 961 break; 962 case POWER_STATE_TYPE_INTERNAL_3DPERF: 963 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 964 return ps; 965 break; 966 default: 967 break; 968 } 969 } 970 /* use a fallback state if we didn't match */ 971 switch (dpm_state) { 972 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 973 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 974 goto restart_search; 975 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 976 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 977 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 978 if (rdev->pm.dpm.uvd_ps) { 979 return rdev->pm.dpm.uvd_ps; 980 } else { 981 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 982 goto restart_search; 983 } 984 case POWER_STATE_TYPE_INTERNAL_THERMAL: 985 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 986 goto restart_search; 987 case POWER_STATE_TYPE_INTERNAL_ACPI: 988 dpm_state = POWER_STATE_TYPE_BATTERY; 989 goto restart_search; 990 case POWER_STATE_TYPE_BATTERY: 991 case POWER_STATE_TYPE_BALANCED: 992 case POWER_STATE_TYPE_INTERNAL_3DPERF: 993 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 994 goto restart_search; 995 default: 996 break; 997 } 998 999 return NULL; 1000 } 1001 1002 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) 1003 { 1004 int i; 1005 struct radeon_ps *ps; 1006 enum radeon_pm_state_type dpm_state; 1007 int ret; 1008 bool single_display = radeon_dpm_single_display(rdev); 1009 1010 /* if dpm init failed */ 1011 if (!rdev->pm.dpm_enabled) 1012 return; 1013 1014 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) { 1015 /* add other state override checks here */ 1016 if ((!rdev->pm.dpm.thermal_active) && 1017 (!rdev->pm.dpm.uvd_active)) 1018 rdev->pm.dpm.state = rdev->pm.dpm.user_state; 1019 } 1020 dpm_state = rdev->pm.dpm.state; 1021 1022 ps = radeon_dpm_pick_power_state(rdev, dpm_state); 1023 if (ps) 1024 rdev->pm.dpm.requested_ps = ps; 1025 else 1026 return; 1027 1028 /* no need to reprogram if nothing changed unless we are on BTC+ */ 1029 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 1030 /* vce just modifies an existing state so force a change */ 1031 if (ps->vce_active != rdev->pm.dpm.vce_active) 1032 goto force; 1033 /* user has made a display change (such as timing) */ 1034 if (rdev->pm.dpm.single_display != single_display) 1035 goto force; 1036 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 1037 /* for pre-BTC and APUs if the num crtcs changed but state is the same, 1038 * all we need to do is update the display configuration. 1039 */ 1040 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) { 1041 /* update display watermarks based on new power state */ 1042 radeon_bandwidth_update(rdev); 1043 /* update displays */ 1044 radeon_dpm_display_configuration_changed(rdev); 1045 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1046 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1047 } 1048 return; 1049 } else { 1050 /* for BTC+ if the num crtcs hasn't changed and state is the same, 1051 * nothing to do, if the num crtcs is > 1 and state is the same, 1052 * update display configuration. 1053 */ 1054 if (rdev->pm.dpm.new_active_crtcs == 1055 rdev->pm.dpm.current_active_crtcs) { 1056 return; 1057 } else { 1058 if ((rdev->pm.dpm.current_active_crtc_count > 1) && 1059 (rdev->pm.dpm.new_active_crtc_count > 1)) { 1060 /* update display watermarks based on new power state */ 1061 radeon_bandwidth_update(rdev); 1062 /* update displays */ 1063 radeon_dpm_display_configuration_changed(rdev); 1064 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1065 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1066 return; 1067 } 1068 } 1069 } 1070 } 1071 1072 force: 1073 if (radeon_dpm == 1) { 1074 printk("switching from power state:\n"); 1075 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 1076 printk("switching to power state:\n"); 1077 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 1078 } 1079 1080 down_write(&rdev->pm.mclk_lock); 1081 mutex_lock(&rdev->ring_lock); 1082 1083 /* update whether vce is active */ 1084 ps->vce_active = rdev->pm.dpm.vce_active; 1085 1086 ret = radeon_dpm_pre_set_power_state(rdev); 1087 if (ret) 1088 goto done; 1089 1090 /* update display watermarks based on new power state */ 1091 radeon_bandwidth_update(rdev); 1092 /* update displays */ 1093 radeon_dpm_display_configuration_changed(rdev); 1094 1095 /* wait for the rings to drain */ 1096 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1097 struct radeon_ring *ring = &rdev->ring[i]; 1098 if (ring->ready) 1099 radeon_fence_wait_empty(rdev, i); 1100 } 1101 1102 /* program the new power state */ 1103 radeon_dpm_set_power_state(rdev); 1104 1105 /* update current power state */ 1106 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps; 1107 1108 radeon_dpm_post_set_power_state(rdev); 1109 1110 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1111 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1112 rdev->pm.dpm.single_display = single_display; 1113 1114 if (rdev->asic->dpm.force_performance_level) { 1115 if (rdev->pm.dpm.thermal_active) { 1116 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 1117 /* force low perf level for thermal */ 1118 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 1119 /* save the user's level */ 1120 rdev->pm.dpm.forced_level = level; 1121 } else { 1122 /* otherwise, user selected level */ 1123 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level); 1124 } 1125 } 1126 1127 done: 1128 mutex_unlock(&rdev->ring_lock); 1129 up_write(&rdev->pm.mclk_lock); 1130 } 1131 1132 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) 1133 { 1134 enum radeon_pm_state_type dpm_state; 1135 1136 if (rdev->asic->dpm.powergate_uvd) { 1137 mutex_lock(&rdev->pm.mutex); 1138 /* don't powergate anything if we 1139 have active but pause streams */ 1140 enable |= rdev->pm.dpm.sd > 0; 1141 enable |= rdev->pm.dpm.hd > 0; 1142 /* enable/disable UVD */ 1143 radeon_dpm_powergate_uvd(rdev, !enable); 1144 mutex_unlock(&rdev->pm.mutex); 1145 } else { 1146 if (enable) { 1147 mutex_lock(&rdev->pm.mutex); 1148 rdev->pm.dpm.uvd_active = true; 1149 /* disable this for now */ 1150 #if 0 1151 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 1152 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 1153 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 1154 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1155 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) 1156 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1157 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 1158 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 1159 else 1160 #endif 1161 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 1162 rdev->pm.dpm.state = dpm_state; 1163 mutex_unlock(&rdev->pm.mutex); 1164 } else { 1165 mutex_lock(&rdev->pm.mutex); 1166 rdev->pm.dpm.uvd_active = false; 1167 mutex_unlock(&rdev->pm.mutex); 1168 } 1169 1170 radeon_pm_compute_clocks(rdev); 1171 } 1172 } 1173 1174 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable) 1175 { 1176 if (enable) { 1177 mutex_lock(&rdev->pm.mutex); 1178 rdev->pm.dpm.vce_active = true; 1179 /* XXX select vce level based on ring/task */ 1180 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL; 1181 mutex_unlock(&rdev->pm.mutex); 1182 } else { 1183 mutex_lock(&rdev->pm.mutex); 1184 rdev->pm.dpm.vce_active = false; 1185 mutex_unlock(&rdev->pm.mutex); 1186 } 1187 1188 radeon_pm_compute_clocks(rdev); 1189 } 1190 1191 static void radeon_pm_suspend_old(struct radeon_device *rdev) 1192 { 1193 mutex_lock(&rdev->pm.mutex); 1194 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1195 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 1196 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 1197 } 1198 mutex_unlock(&rdev->pm.mutex); 1199 1200 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1201 } 1202 1203 static void radeon_pm_suspend_dpm(struct radeon_device *rdev) 1204 { 1205 mutex_lock(&rdev->pm.mutex); 1206 /* disable dpm */ 1207 radeon_dpm_disable(rdev); 1208 /* reset the power state */ 1209 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1210 rdev->pm.dpm_enabled = false; 1211 mutex_unlock(&rdev->pm.mutex); 1212 } 1213 1214 void radeon_pm_suspend(struct radeon_device *rdev) 1215 { 1216 if (rdev->pm.pm_method == PM_METHOD_DPM) 1217 radeon_pm_suspend_dpm(rdev); 1218 else 1219 radeon_pm_suspend_old(rdev); 1220 } 1221 1222 static void radeon_pm_resume_old(struct radeon_device *rdev) 1223 { 1224 /* set up the default clocks if the MC ucode is loaded */ 1225 if ((rdev->family >= CHIP_BARTS) && 1226 (rdev->family <= CHIP_CAYMAN) && 1227 rdev->mc_fw) { 1228 if (rdev->pm.default_vddc) 1229 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1230 SET_VOLTAGE_TYPE_ASIC_VDDC); 1231 if (rdev->pm.default_vddci) 1232 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1233 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1234 if (rdev->pm.default_sclk) 1235 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1236 if (rdev->pm.default_mclk) 1237 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1238 } 1239 /* asic init will reset the default power state */ 1240 mutex_lock(&rdev->pm.mutex); 1241 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 1242 rdev->pm.current_clock_mode_index = 0; 1243 rdev->pm.current_sclk = rdev->pm.default_sclk; 1244 rdev->pm.current_mclk = rdev->pm.default_mclk; 1245 if (rdev->pm.power_state) { 1246 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 1247 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 1248 } 1249 if (rdev->pm.pm_method == PM_METHOD_DYNPM 1250 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 1251 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1252 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1253 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1254 } 1255 mutex_unlock(&rdev->pm.mutex); 1256 radeon_pm_compute_clocks(rdev); 1257 } 1258 1259 static void radeon_pm_resume_dpm(struct radeon_device *rdev) 1260 { 1261 int ret; 1262 1263 /* asic init will reset to the boot state */ 1264 mutex_lock(&rdev->pm.mutex); 1265 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1266 radeon_dpm_setup_asic(rdev); 1267 ret = radeon_dpm_enable(rdev); 1268 mutex_unlock(&rdev->pm.mutex); 1269 if (ret) 1270 goto dpm_resume_fail; 1271 rdev->pm.dpm_enabled = true; 1272 return; 1273 1274 dpm_resume_fail: 1275 DRM_ERROR("radeon: dpm resume failed\n"); 1276 if ((rdev->family >= CHIP_BARTS) && 1277 (rdev->family <= CHIP_CAYMAN) && 1278 rdev->mc_fw) { 1279 if (rdev->pm.default_vddc) 1280 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1281 SET_VOLTAGE_TYPE_ASIC_VDDC); 1282 if (rdev->pm.default_vddci) 1283 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1284 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1285 if (rdev->pm.default_sclk) 1286 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1287 if (rdev->pm.default_mclk) 1288 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1289 } 1290 } 1291 1292 void radeon_pm_resume(struct radeon_device *rdev) 1293 { 1294 if (rdev->pm.pm_method == PM_METHOD_DPM) 1295 radeon_pm_resume_dpm(rdev); 1296 else 1297 radeon_pm_resume_old(rdev); 1298 } 1299 1300 static int radeon_pm_init_old(struct radeon_device *rdev) 1301 { 1302 int ret; 1303 1304 rdev->pm.profile = PM_PROFILE_DEFAULT; 1305 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1306 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1307 rdev->pm.dynpm_can_upclock = true; 1308 rdev->pm.dynpm_can_downclock = true; 1309 rdev->pm.default_sclk = rdev->clock.default_sclk; 1310 rdev->pm.default_mclk = rdev->clock.default_mclk; 1311 rdev->pm.current_sclk = rdev->clock.default_sclk; 1312 rdev->pm.current_mclk = rdev->clock.default_mclk; 1313 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1314 1315 if (rdev->bios) { 1316 if (rdev->is_atom_bios) 1317 radeon_atombios_get_power_modes(rdev); 1318 else 1319 radeon_combios_get_power_modes(rdev); 1320 radeon_pm_print_states(rdev); 1321 radeon_pm_init_profile(rdev); 1322 /* set up the default clocks if the MC ucode is loaded */ 1323 if ((rdev->family >= CHIP_BARTS) && 1324 (rdev->family <= CHIP_CAYMAN) && 1325 rdev->mc_fw) { 1326 if (rdev->pm.default_vddc) 1327 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1328 SET_VOLTAGE_TYPE_ASIC_VDDC); 1329 if (rdev->pm.default_vddci) 1330 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1331 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1332 if (rdev->pm.default_sclk) 1333 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1334 if (rdev->pm.default_mclk) 1335 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1336 } 1337 } 1338 1339 /* set up the internal thermal sensor if applicable */ 1340 ret = radeon_hwmon_init(rdev); 1341 if (ret) 1342 return ret; 1343 1344 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 1345 1346 if (rdev->pm.num_power_states > 1) { 1347 if (radeon_debugfs_pm_init(rdev)) { 1348 DRM_ERROR("Failed to register debugfs file for PM!\n"); 1349 } 1350 1351 DRM_INFO("radeon: power management initialized\n"); 1352 } 1353 1354 return 0; 1355 } 1356 1357 static void radeon_dpm_print_power_states(struct radeon_device *rdev) 1358 { 1359 int i; 1360 1361 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 1362 printk("== power state %d ==\n", i); 1363 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]); 1364 } 1365 } 1366 1367 static int radeon_pm_init_dpm(struct radeon_device *rdev) 1368 { 1369 int ret; 1370 1371 /* default to balanced state */ 1372 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1373 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1374 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1375 rdev->pm.default_sclk = rdev->clock.default_sclk; 1376 rdev->pm.default_mclk = rdev->clock.default_mclk; 1377 rdev->pm.current_sclk = rdev->clock.default_sclk; 1378 rdev->pm.current_mclk = rdev->clock.default_mclk; 1379 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1380 1381 if (rdev->bios && rdev->is_atom_bios) 1382 radeon_atombios_get_power_modes(rdev); 1383 else 1384 return -EINVAL; 1385 1386 /* set up the internal thermal sensor if applicable */ 1387 ret = radeon_hwmon_init(rdev); 1388 if (ret) 1389 return ret; 1390 1391 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler); 1392 mutex_lock(&rdev->pm.mutex); 1393 radeon_dpm_init(rdev); 1394 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1395 if (radeon_dpm == 1) 1396 radeon_dpm_print_power_states(rdev); 1397 radeon_dpm_setup_asic(rdev); 1398 ret = radeon_dpm_enable(rdev); 1399 mutex_unlock(&rdev->pm.mutex); 1400 if (ret) 1401 goto dpm_failed; 1402 rdev->pm.dpm_enabled = true; 1403 1404 if (radeon_debugfs_pm_init(rdev)) { 1405 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1406 } 1407 1408 DRM_INFO("radeon: dpm initialized\n"); 1409 1410 return 0; 1411 1412 dpm_failed: 1413 rdev->pm.dpm_enabled = false; 1414 if ((rdev->family >= CHIP_BARTS) && 1415 (rdev->family <= CHIP_CAYMAN) && 1416 rdev->mc_fw) { 1417 if (rdev->pm.default_vddc) 1418 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1419 SET_VOLTAGE_TYPE_ASIC_VDDC); 1420 if (rdev->pm.default_vddci) 1421 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1422 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1423 if (rdev->pm.default_sclk) 1424 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1425 if (rdev->pm.default_mclk) 1426 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1427 } 1428 DRM_ERROR("radeon: dpm initialization failed\n"); 1429 return ret; 1430 } 1431 1432 struct radeon_dpm_quirk { 1433 u32 chip_vendor; 1434 u32 chip_device; 1435 u32 subsys_vendor; 1436 u32 subsys_device; 1437 }; 1438 1439 /* cards with dpm stability problems */ 1440 static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { 1441 /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ 1442 { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, 1443 /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ 1444 { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, 1445 { 0, 0, 0, 0 }, 1446 }; 1447 1448 int radeon_pm_init(struct radeon_device *rdev) 1449 { 1450 struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; 1451 bool disable_dpm = false; 1452 1453 /* Apply dpm quirks */ 1454 while (p && p->chip_device != 0) { 1455 if (rdev->pdev->vendor == p->chip_vendor && 1456 rdev->pdev->device == p->chip_device && 1457 rdev->pdev->subsystem_vendor == p->subsys_vendor && 1458 rdev->pdev->subsystem_device == p->subsys_device) { 1459 disable_dpm = true; 1460 break; 1461 } 1462 ++p; 1463 } 1464 1465 /* enable dpm on rv6xx+ */ 1466 switch (rdev->family) { 1467 case CHIP_RV610: 1468 case CHIP_RV630: 1469 case CHIP_RV620: 1470 case CHIP_RV635: 1471 case CHIP_RV670: 1472 case CHIP_RS780: 1473 case CHIP_RS880: 1474 case CHIP_RV770: 1475 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1476 if (!rdev->rlc_fw) 1477 rdev->pm.pm_method = PM_METHOD_PROFILE; 1478 else if ((rdev->family >= CHIP_RV770) && 1479 (!(rdev->flags & RADEON_IS_IGP)) && 1480 (!rdev->smc_fw)) 1481 rdev->pm.pm_method = PM_METHOD_PROFILE; 1482 else if (radeon_dpm == 1) 1483 rdev->pm.pm_method = PM_METHOD_DPM; 1484 else 1485 rdev->pm.pm_method = PM_METHOD_PROFILE; 1486 break; 1487 case CHIP_RV730: 1488 case CHIP_RV710: 1489 case CHIP_RV740: 1490 case CHIP_CEDAR: 1491 case CHIP_REDWOOD: 1492 case CHIP_JUNIPER: 1493 case CHIP_CYPRESS: 1494 case CHIP_HEMLOCK: 1495 case CHIP_PALM: 1496 case CHIP_SUMO: 1497 case CHIP_SUMO2: 1498 case CHIP_BARTS: 1499 case CHIP_TURKS: 1500 case CHIP_CAICOS: 1501 case CHIP_CAYMAN: 1502 case CHIP_ARUBA: 1503 case CHIP_TAHITI: 1504 case CHIP_PITCAIRN: 1505 case CHIP_VERDE: 1506 case CHIP_OLAND: 1507 case CHIP_HAINAN: 1508 case CHIP_BONAIRE: 1509 case CHIP_KABINI: 1510 case CHIP_KAVERI: 1511 case CHIP_HAWAII: 1512 case CHIP_MULLINS: 1513 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1514 if (!rdev->rlc_fw) 1515 rdev->pm.pm_method = PM_METHOD_PROFILE; 1516 else if ((rdev->family >= CHIP_RV770) && 1517 (!(rdev->flags & RADEON_IS_IGP)) && 1518 (!rdev->smc_fw)) 1519 rdev->pm.pm_method = PM_METHOD_PROFILE; 1520 else if (disable_dpm && (radeon_dpm == -1)) 1521 rdev->pm.pm_method = PM_METHOD_PROFILE; 1522 else if (radeon_dpm == 0) 1523 rdev->pm.pm_method = PM_METHOD_PROFILE; 1524 else 1525 rdev->pm.pm_method = PM_METHOD_DPM; 1526 break; 1527 default: 1528 /* default to profile method */ 1529 rdev->pm.pm_method = PM_METHOD_PROFILE; 1530 break; 1531 } 1532 1533 if (rdev->pm.pm_method == PM_METHOD_DPM) 1534 return radeon_pm_init_dpm(rdev); 1535 else 1536 return radeon_pm_init_old(rdev); 1537 } 1538 1539 int radeon_pm_late_init(struct radeon_device *rdev) 1540 { 1541 int ret = 0; 1542 1543 if (rdev->pm.pm_method == PM_METHOD_DPM) { 1544 if (rdev->pm.dpm_enabled) { 1545 if (!rdev->pm.sysfs_initialized) { 1546 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1547 if (ret) 1548 DRM_ERROR("failed to create device file for dpm state\n"); 1549 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1550 if (ret) 1551 DRM_ERROR("failed to create device file for dpm state\n"); 1552 /* XXX: these are noops for dpm but are here for backwards compat */ 1553 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1554 if (ret) 1555 DRM_ERROR("failed to create device file for power profile\n"); 1556 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1557 if (ret) 1558 DRM_ERROR("failed to create device file for power method\n"); 1559 rdev->pm.sysfs_initialized = true; 1560 } 1561 1562 mutex_lock(&rdev->pm.mutex); 1563 ret = radeon_dpm_late_enable(rdev); 1564 mutex_unlock(&rdev->pm.mutex); 1565 if (ret) { 1566 rdev->pm.dpm_enabled = false; 1567 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1568 } else { 1569 /* set the dpm state for PX since there won't be 1570 * a modeset to call this. 1571 */ 1572 radeon_pm_compute_clocks(rdev); 1573 } 1574 } 1575 } else { 1576 if ((rdev->pm.num_power_states > 1) && 1577 (!rdev->pm.sysfs_initialized)) { 1578 /* where's the best place to put these? */ 1579 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1580 if (ret) 1581 DRM_ERROR("failed to create device file for power profile\n"); 1582 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1583 if (ret) 1584 DRM_ERROR("failed to create device file for power method\n"); 1585 if (!ret) 1586 rdev->pm.sysfs_initialized = true; 1587 } 1588 } 1589 return ret; 1590 } 1591 1592 static void radeon_pm_fini_old(struct radeon_device *rdev) 1593 { 1594 if (rdev->pm.num_power_states > 1) { 1595 mutex_lock(&rdev->pm.mutex); 1596 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1597 rdev->pm.profile = PM_PROFILE_DEFAULT; 1598 radeon_pm_update_profile(rdev); 1599 radeon_pm_set_clocks(rdev); 1600 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1601 /* reset default clocks */ 1602 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1603 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1604 radeon_pm_set_clocks(rdev); 1605 } 1606 mutex_unlock(&rdev->pm.mutex); 1607 1608 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1609 1610 device_remove_file(rdev->dev, &dev_attr_power_profile); 1611 device_remove_file(rdev->dev, &dev_attr_power_method); 1612 } 1613 1614 radeon_hwmon_fini(rdev); 1615 kfree(rdev->pm.power_state); 1616 } 1617 1618 static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1619 { 1620 if (rdev->pm.num_power_states > 1) { 1621 mutex_lock(&rdev->pm.mutex); 1622 radeon_dpm_disable(rdev); 1623 mutex_unlock(&rdev->pm.mutex); 1624 1625 device_remove_file(rdev->dev, &dev_attr_power_dpm_state); 1626 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1627 /* XXX backwards compat */ 1628 device_remove_file(rdev->dev, &dev_attr_power_profile); 1629 device_remove_file(rdev->dev, &dev_attr_power_method); 1630 } 1631 radeon_dpm_fini(rdev); 1632 1633 radeon_hwmon_fini(rdev); 1634 kfree(rdev->pm.power_state); 1635 } 1636 1637 void radeon_pm_fini(struct radeon_device *rdev) 1638 { 1639 if (rdev->pm.pm_method == PM_METHOD_DPM) 1640 radeon_pm_fini_dpm(rdev); 1641 else 1642 radeon_pm_fini_old(rdev); 1643 } 1644 1645 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) 1646 { 1647 struct drm_device *ddev = rdev->ddev; 1648 struct drm_crtc *crtc; 1649 struct radeon_crtc *radeon_crtc; 1650 1651 if (rdev->pm.num_power_states < 2) 1652 return; 1653 1654 mutex_lock(&rdev->pm.mutex); 1655 1656 rdev->pm.active_crtcs = 0; 1657 rdev->pm.active_crtc_count = 0; 1658 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1659 list_for_each_entry(crtc, 1660 &ddev->mode_config.crtc_list, head) { 1661 radeon_crtc = to_radeon_crtc(crtc); 1662 if (radeon_crtc->enabled) { 1663 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 1664 rdev->pm.active_crtc_count++; 1665 } 1666 } 1667 } 1668 1669 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1670 radeon_pm_update_profile(rdev); 1671 radeon_pm_set_clocks(rdev); 1672 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1673 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 1674 if (rdev->pm.active_crtc_count > 1) { 1675 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1676 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1677 1678 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 1679 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1680 radeon_pm_get_dynpm_state(rdev); 1681 radeon_pm_set_clocks(rdev); 1682 1683 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 1684 } 1685 } else if (rdev->pm.active_crtc_count == 1) { 1686 /* TODO: Increase clocks if needed for current mode */ 1687 1688 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 1689 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1690 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 1691 radeon_pm_get_dynpm_state(rdev); 1692 radeon_pm_set_clocks(rdev); 1693 1694 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1695 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1696 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 1697 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1698 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1699 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1700 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 1701 } 1702 } else { /* count == 0 */ 1703 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 1704 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1705 1706 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 1707 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 1708 radeon_pm_get_dynpm_state(rdev); 1709 radeon_pm_set_clocks(rdev); 1710 } 1711 } 1712 } 1713 } 1714 1715 mutex_unlock(&rdev->pm.mutex); 1716 } 1717 1718 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) 1719 { 1720 struct drm_device *ddev = rdev->ddev; 1721 struct drm_crtc *crtc; 1722 struct radeon_crtc *radeon_crtc; 1723 1724 if (!rdev->pm.dpm_enabled) 1725 return; 1726 1727 mutex_lock(&rdev->pm.mutex); 1728 1729 /* update active crtc counts */ 1730 rdev->pm.dpm.new_active_crtcs = 0; 1731 rdev->pm.dpm.new_active_crtc_count = 0; 1732 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1733 list_for_each_entry(crtc, 1734 &ddev->mode_config.crtc_list, head) { 1735 radeon_crtc = to_radeon_crtc(crtc); 1736 if (crtc->enabled) { 1737 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); 1738 rdev->pm.dpm.new_active_crtc_count++; 1739 } 1740 } 1741 } 1742 1743 /* update battery/ac status */ 1744 if (power_supply_is_system_supplied() > 0) 1745 rdev->pm.dpm.ac_power = true; 1746 else 1747 rdev->pm.dpm.ac_power = false; 1748 1749 radeon_dpm_change_power_state_locked(rdev); 1750 1751 mutex_unlock(&rdev->pm.mutex); 1752 1753 } 1754 1755 void radeon_pm_compute_clocks(struct radeon_device *rdev) 1756 { 1757 if (rdev->pm.pm_method == PM_METHOD_DPM) 1758 radeon_pm_compute_clocks_dpm(rdev); 1759 else 1760 radeon_pm_compute_clocks_old(rdev); 1761 } 1762 1763 static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1764 { 1765 int crtc, vpos, hpos, vbl_status; 1766 bool in_vbl = true; 1767 1768 /* Iterate over all active crtc's. All crtc's must be in vblank, 1769 * otherwise return in_vbl == false. 1770 */ 1771 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1772 if (rdev->pm.active_crtcs & (1 << crtc)) { 1773 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, 1774 crtc, 1775 USE_REAL_VBLANKSTART, 1776 &vpos, &hpos, NULL, NULL, 1777 &rdev->mode_info.crtcs[crtc]->base.hwmode); 1778 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1779 !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK)) 1780 in_vbl = false; 1781 } 1782 } 1783 1784 return in_vbl; 1785 } 1786 1787 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 1788 { 1789 u32 stat_crtc = 0; 1790 bool in_vbl = radeon_pm_in_vbl(rdev); 1791 1792 if (!in_vbl) 1793 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 1794 finish ? "exit" : "entry"); 1795 return in_vbl; 1796 } 1797 1798 static void radeon_dynpm_idle_work_handler(struct work_struct *work) 1799 { 1800 struct radeon_device *rdev; 1801 int resched; 1802 rdev = container_of(work, struct radeon_device, 1803 pm.dynpm_idle_work.work); 1804 1805 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1806 mutex_lock(&rdev->pm.mutex); 1807 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1808 int not_processed = 0; 1809 int i; 1810 1811 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1812 struct radeon_ring *ring = &rdev->ring[i]; 1813 1814 if (ring->ready) { 1815 not_processed += radeon_fence_count_emitted(rdev, i); 1816 if (not_processed >= 3) 1817 break; 1818 } 1819 } 1820 1821 if (not_processed >= 3) { /* should upclock */ 1822 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 1823 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1824 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1825 rdev->pm.dynpm_can_upclock) { 1826 rdev->pm.dynpm_planned_action = 1827 DYNPM_ACTION_UPCLOCK; 1828 rdev->pm.dynpm_action_timeout = jiffies + 1829 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1830 } 1831 } else if (not_processed == 0) { /* should downclock */ 1832 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 1833 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1834 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1835 rdev->pm.dynpm_can_downclock) { 1836 rdev->pm.dynpm_planned_action = 1837 DYNPM_ACTION_DOWNCLOCK; 1838 rdev->pm.dynpm_action_timeout = jiffies + 1839 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1840 } 1841 } 1842 1843 /* Note, radeon_pm_set_clocks is called with static_switch set 1844 * to false since we want to wait for vbl to avoid flicker. 1845 */ 1846 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 1847 jiffies > rdev->pm.dynpm_action_timeout) { 1848 radeon_pm_get_dynpm_state(rdev); 1849 radeon_pm_set_clocks(rdev); 1850 } 1851 1852 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1853 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1854 } 1855 mutex_unlock(&rdev->pm.mutex); 1856 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1857 } 1858 1859 /* 1860 * Debugfs info 1861 */ 1862 #if defined(CONFIG_DEBUG_FS) 1863 1864 static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 1865 { 1866 struct drm_info_node *node = (struct drm_info_node *) m->private; 1867 struct drm_device *dev = node->minor->dev; 1868 struct radeon_device *rdev = dev->dev_private; 1869 struct drm_device *ddev = rdev->ddev; 1870 1871 if ((rdev->flags & RADEON_IS_PX) && 1872 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1873 seq_printf(m, "PX asic powered off\n"); 1874 } else if (rdev->pm.dpm_enabled) { 1875 mutex_lock(&rdev->pm.mutex); 1876 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1877 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1878 else 1879 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1880 mutex_unlock(&rdev->pm.mutex); 1881 } else { 1882 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 1883 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ 1884 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) 1885 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); 1886 else 1887 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 1888 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 1889 if (rdev->asic->pm.get_memory_clock) 1890 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 1891 if (rdev->pm.current_vddc) 1892 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 1893 if (rdev->asic->pm.get_pcie_lanes) 1894 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 1895 } 1896 1897 return 0; 1898 } 1899 1900 static struct drm_info_list radeon_pm_info_list[] = { 1901 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 1902 }; 1903 #endif 1904 1905 static int radeon_debugfs_pm_init(struct radeon_device *rdev) 1906 { 1907 #if defined(CONFIG_DEBUG_FS) 1908 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 1909 #else 1910 return 0; 1911 #endif 1912 } 1913