1 /* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 */ 23 #include <drm/drmP.h> 24 #include "radeon.h" 25 #include "avivod.h" 26 #include "atom.h" 27 #include <linux/power_supply.h> 28 #include <linux/hwmon.h> 29 #include <linux/hwmon-sysfs.h> 30 31 #define RADEON_IDLE_LOOP_MS 100 32 #define RADEON_RECLOCK_DELAY_MS 200 33 #define RADEON_WAIT_VBLANK_TIMEOUT 200 34 35 static const char *radeon_pm_state_type_name[5] = { 36 "", 37 "Powersave", 38 "Battery", 39 "Balanced", 40 "Performance", 41 }; 42 43 static void radeon_dynpm_idle_work_handler(struct work_struct *work); 44 static int radeon_debugfs_pm_init(struct radeon_device *rdev); 45 static bool radeon_pm_in_vbl(struct radeon_device *rdev); 46 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 47 static void radeon_pm_update_profile(struct radeon_device *rdev); 48 static void radeon_pm_set_clocks(struct radeon_device *rdev); 49 50 int radeon_pm_get_type_index(struct radeon_device *rdev, 51 enum radeon_pm_state_type ps_type, 52 int instance) 53 { 54 int i; 55 int found_instance = -1; 56 57 for (i = 0; i < rdev->pm.num_power_states; i++) { 58 if (rdev->pm.power_state[i].type == ps_type) { 59 found_instance++; 60 if (found_instance == instance) 61 return i; 62 } 63 } 64 /* return default if no match */ 65 return rdev->pm.default_power_state_index; 66 } 67 68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 69 { 70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 71 mutex_lock(&rdev->pm.mutex); 72 if (power_supply_is_system_supplied() > 0) 73 rdev->pm.dpm.ac_power = true; 74 else 75 rdev->pm.dpm.ac_power = false; 76 if (rdev->asic->dpm.enable_bapm) 77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 78 mutex_unlock(&rdev->pm.mutex); 79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 80 if (rdev->pm.profile == PM_PROFILE_AUTO) { 81 mutex_lock(&rdev->pm.mutex); 82 radeon_pm_update_profile(rdev); 83 radeon_pm_set_clocks(rdev); 84 mutex_unlock(&rdev->pm.mutex); 85 } 86 } 87 } 88 89 static void radeon_pm_update_profile(struct radeon_device *rdev) 90 { 91 switch (rdev->pm.profile) { 92 case PM_PROFILE_DEFAULT: 93 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 94 break; 95 case PM_PROFILE_AUTO: 96 if (power_supply_is_system_supplied() > 0) { 97 if (rdev->pm.active_crtc_count > 1) 98 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 99 else 100 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 101 } else { 102 if (rdev->pm.active_crtc_count > 1) 103 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 104 else 105 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 106 } 107 break; 108 case PM_PROFILE_LOW: 109 if (rdev->pm.active_crtc_count > 1) 110 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 111 else 112 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 113 break; 114 case PM_PROFILE_MID: 115 if (rdev->pm.active_crtc_count > 1) 116 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 117 else 118 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 119 break; 120 case PM_PROFILE_HIGH: 121 if (rdev->pm.active_crtc_count > 1) 122 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 123 else 124 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 125 break; 126 } 127 128 if (rdev->pm.active_crtc_count == 0) { 129 rdev->pm.requested_power_state_index = 130 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 131 rdev->pm.requested_clock_mode_index = 132 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 133 } else { 134 rdev->pm.requested_power_state_index = 135 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 136 rdev->pm.requested_clock_mode_index = 137 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 138 } 139 } 140 141 static void radeon_unmap_vram_bos(struct radeon_device *rdev) 142 { 143 struct radeon_bo *bo, *n; 144 145 if (list_empty(&rdev->gem.objects)) 146 return; 147 148 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 149 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 150 ttm_bo_unmap_virtual(&bo->tbo); 151 } 152 } 153 154 static void radeon_sync_with_vblank(struct radeon_device *rdev) 155 { 156 if (rdev->pm.active_crtcs) { 157 rdev->pm.vblank_sync = false; 158 wait_event_timeout( 159 rdev->irq.vblank_queue, rdev->pm.vblank_sync, 160 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 161 } 162 } 163 164 static void radeon_set_power_state(struct radeon_device *rdev) 165 { 166 u32 sclk, mclk; 167 bool misc_after = false; 168 169 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 170 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 171 return; 172 173 if (radeon_gui_idle(rdev)) { 174 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 175 clock_info[rdev->pm.requested_clock_mode_index].sclk; 176 if (sclk > rdev->pm.default_sclk) 177 sclk = rdev->pm.default_sclk; 178 179 /* starting with BTC, there is one state that is used for both 180 * MH and SH. Difference is that we always use the high clock index for 181 * mclk and vddci. 182 */ 183 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 184 (rdev->family >= CHIP_BARTS) && 185 rdev->pm.active_crtc_count && 186 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 187 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 188 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 189 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 190 else 191 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 192 clock_info[rdev->pm.requested_clock_mode_index].mclk; 193 194 if (mclk > rdev->pm.default_mclk) 195 mclk = rdev->pm.default_mclk; 196 197 /* upvolt before raising clocks, downvolt after lowering clocks */ 198 if (sclk < rdev->pm.current_sclk) 199 misc_after = true; 200 201 radeon_sync_with_vblank(rdev); 202 203 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 204 if (!radeon_pm_in_vbl(rdev)) 205 return; 206 } 207 208 radeon_pm_prepare(rdev); 209 210 if (!misc_after) 211 /* voltage, pcie lanes, etc.*/ 212 radeon_pm_misc(rdev); 213 214 /* set engine clock */ 215 if (sclk != rdev->pm.current_sclk) { 216 radeon_pm_debug_check_in_vbl(rdev, false); 217 radeon_set_engine_clock(rdev, sclk); 218 radeon_pm_debug_check_in_vbl(rdev, true); 219 rdev->pm.current_sclk = sclk; 220 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 221 } 222 223 /* set memory clock */ 224 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 225 radeon_pm_debug_check_in_vbl(rdev, false); 226 radeon_set_memory_clock(rdev, mclk); 227 radeon_pm_debug_check_in_vbl(rdev, true); 228 rdev->pm.current_mclk = mclk; 229 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 230 } 231 232 if (misc_after) 233 /* voltage, pcie lanes, etc.*/ 234 radeon_pm_misc(rdev); 235 236 radeon_pm_finish(rdev); 237 238 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 239 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 240 } else 241 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 242 } 243 244 static void radeon_pm_set_clocks(struct radeon_device *rdev) 245 { 246 int i, r; 247 248 /* no need to take locks, etc. if nothing's going to change */ 249 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 250 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 251 return; 252 253 mutex_lock(&rdev->ddev->struct_mutex); 254 down_write(&rdev->pm.mclk_lock); 255 mutex_lock(&rdev->ring_lock); 256 257 /* wait for the rings to drain */ 258 for (i = 0; i < RADEON_NUM_RINGS; i++) { 259 struct radeon_ring *ring = &rdev->ring[i]; 260 if (!ring->ready) { 261 continue; 262 } 263 r = radeon_fence_wait_empty_locked(rdev, i); 264 if (r) { 265 /* needs a GPU reset dont reset here */ 266 mutex_unlock(&rdev->ring_lock); 267 up_write(&rdev->pm.mclk_lock); 268 mutex_unlock(&rdev->ddev->struct_mutex); 269 return; 270 } 271 } 272 273 radeon_unmap_vram_bos(rdev); 274 275 if (rdev->irq.installed) { 276 for (i = 0; i < rdev->num_crtc; i++) { 277 if (rdev->pm.active_crtcs & (1 << i)) { 278 rdev->pm.req_vblank |= (1 << i); 279 drm_vblank_get(rdev->ddev, i); 280 } 281 } 282 } 283 284 radeon_set_power_state(rdev); 285 286 if (rdev->irq.installed) { 287 for (i = 0; i < rdev->num_crtc; i++) { 288 if (rdev->pm.req_vblank & (1 << i)) { 289 rdev->pm.req_vblank &= ~(1 << i); 290 drm_vblank_put(rdev->ddev, i); 291 } 292 } 293 } 294 295 /* update display watermarks based on new power state */ 296 radeon_update_bandwidth_info(rdev); 297 if (rdev->pm.active_crtc_count) 298 radeon_bandwidth_update(rdev); 299 300 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 301 302 mutex_unlock(&rdev->ring_lock); 303 up_write(&rdev->pm.mclk_lock); 304 mutex_unlock(&rdev->ddev->struct_mutex); 305 } 306 307 static void radeon_pm_print_states(struct radeon_device *rdev) 308 { 309 int i, j; 310 struct radeon_power_state *power_state; 311 struct radeon_pm_clock_info *clock_info; 312 313 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 314 for (i = 0; i < rdev->pm.num_power_states; i++) { 315 power_state = &rdev->pm.power_state[i]; 316 DRM_DEBUG_DRIVER("State %d: %s\n", i, 317 radeon_pm_state_type_name[power_state->type]); 318 if (i == rdev->pm.default_power_state_index) 319 DRM_DEBUG_DRIVER("\tDefault"); 320 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 321 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 322 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 323 DRM_DEBUG_DRIVER("\tSingle display only\n"); 324 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 325 for (j = 0; j < power_state->num_clock_modes; j++) { 326 clock_info = &(power_state->clock_info[j]); 327 if (rdev->flags & RADEON_IS_IGP) 328 DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 329 j, 330 clock_info->sclk * 10); 331 else 332 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 333 j, 334 clock_info->sclk * 10, 335 clock_info->mclk * 10, 336 clock_info->voltage.voltage); 337 } 338 } 339 } 340 341 static ssize_t radeon_get_pm_profile(struct device *dev, 342 struct device_attribute *attr, 343 char *buf) 344 { 345 struct drm_device *ddev = dev_get_drvdata(dev); 346 struct radeon_device *rdev = ddev->dev_private; 347 int cp = rdev->pm.profile; 348 349 return snprintf(buf, PAGE_SIZE, "%s\n", 350 (cp == PM_PROFILE_AUTO) ? "auto" : 351 (cp == PM_PROFILE_LOW) ? "low" : 352 (cp == PM_PROFILE_MID) ? "mid" : 353 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 354 } 355 356 static ssize_t radeon_set_pm_profile(struct device *dev, 357 struct device_attribute *attr, 358 const char *buf, 359 size_t count) 360 { 361 struct drm_device *ddev = dev_get_drvdata(dev); 362 struct radeon_device *rdev = ddev->dev_private; 363 364 mutex_lock(&rdev->pm.mutex); 365 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 366 if (strncmp("default", buf, strlen("default")) == 0) 367 rdev->pm.profile = PM_PROFILE_DEFAULT; 368 else if (strncmp("auto", buf, strlen("auto")) == 0) 369 rdev->pm.profile = PM_PROFILE_AUTO; 370 else if (strncmp("low", buf, strlen("low")) == 0) 371 rdev->pm.profile = PM_PROFILE_LOW; 372 else if (strncmp("mid", buf, strlen("mid")) == 0) 373 rdev->pm.profile = PM_PROFILE_MID; 374 else if (strncmp("high", buf, strlen("high")) == 0) 375 rdev->pm.profile = PM_PROFILE_HIGH; 376 else { 377 count = -EINVAL; 378 goto fail; 379 } 380 radeon_pm_update_profile(rdev); 381 radeon_pm_set_clocks(rdev); 382 } else 383 count = -EINVAL; 384 385 fail: 386 mutex_unlock(&rdev->pm.mutex); 387 388 return count; 389 } 390 391 static ssize_t radeon_get_pm_method(struct device *dev, 392 struct device_attribute *attr, 393 char *buf) 394 { 395 struct drm_device *ddev = dev_get_drvdata(dev); 396 struct radeon_device *rdev = ddev->dev_private; 397 int pm = rdev->pm.pm_method; 398 399 return snprintf(buf, PAGE_SIZE, "%s\n", 400 (pm == PM_METHOD_DYNPM) ? "dynpm" : 401 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm"); 402 } 403 404 static ssize_t radeon_set_pm_method(struct device *dev, 405 struct device_attribute *attr, 406 const char *buf, 407 size_t count) 408 { 409 struct drm_device *ddev = dev_get_drvdata(dev); 410 struct radeon_device *rdev = ddev->dev_private; 411 412 /* we don't support the legacy modes with dpm */ 413 if (rdev->pm.pm_method == PM_METHOD_DPM) { 414 count = -EINVAL; 415 goto fail; 416 } 417 418 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 419 mutex_lock(&rdev->pm.mutex); 420 rdev->pm.pm_method = PM_METHOD_DYNPM; 421 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 422 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 423 mutex_unlock(&rdev->pm.mutex); 424 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 425 mutex_lock(&rdev->pm.mutex); 426 /* disable dynpm */ 427 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 428 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 429 rdev->pm.pm_method = PM_METHOD_PROFILE; 430 mutex_unlock(&rdev->pm.mutex); 431 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 432 } else { 433 count = -EINVAL; 434 goto fail; 435 } 436 radeon_pm_compute_clocks(rdev); 437 fail: 438 return count; 439 } 440 441 static ssize_t radeon_get_dpm_state(struct device *dev, 442 struct device_attribute *attr, 443 char *buf) 444 { 445 struct drm_device *ddev = dev_get_drvdata(dev); 446 struct radeon_device *rdev = ddev->dev_private; 447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 448 449 return snprintf(buf, PAGE_SIZE, "%s\n", 450 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 451 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 452 } 453 454 static ssize_t radeon_set_dpm_state(struct device *dev, 455 struct device_attribute *attr, 456 const char *buf, 457 size_t count) 458 { 459 struct drm_device *ddev = dev_get_drvdata(dev); 460 struct radeon_device *rdev = ddev->dev_private; 461 462 mutex_lock(&rdev->pm.mutex); 463 if (strncmp("battery", buf, strlen("battery")) == 0) 464 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 465 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 466 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 467 else if (strncmp("performance", buf, strlen("performance")) == 0) 468 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 469 else { 470 mutex_unlock(&rdev->pm.mutex); 471 count = -EINVAL; 472 goto fail; 473 } 474 mutex_unlock(&rdev->pm.mutex); 475 radeon_pm_compute_clocks(rdev); 476 fail: 477 return count; 478 } 479 480 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, 481 struct device_attribute *attr, 482 char *buf) 483 { 484 struct drm_device *ddev = dev_get_drvdata(dev); 485 struct radeon_device *rdev = ddev->dev_private; 486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 487 488 return snprintf(buf, PAGE_SIZE, "%s\n", 489 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 490 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 491 } 492 493 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, 494 struct device_attribute *attr, 495 const char *buf, 496 size_t count) 497 { 498 struct drm_device *ddev = dev_get_drvdata(dev); 499 struct radeon_device *rdev = ddev->dev_private; 500 enum radeon_dpm_forced_level level; 501 int ret = 0; 502 503 mutex_lock(&rdev->pm.mutex); 504 if (strncmp("low", buf, strlen("low")) == 0) { 505 level = RADEON_DPM_FORCED_LEVEL_LOW; 506 } else if (strncmp("high", buf, strlen("high")) == 0) { 507 level = RADEON_DPM_FORCED_LEVEL_HIGH; 508 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 509 level = RADEON_DPM_FORCED_LEVEL_AUTO; 510 } else { 511 count = -EINVAL; 512 goto fail; 513 } 514 if (rdev->asic->dpm.force_performance_level) { 515 if (rdev->pm.dpm.thermal_active) { 516 count = -EINVAL; 517 goto fail; 518 } 519 ret = radeon_dpm_force_performance_level(rdev, level); 520 if (ret) 521 count = -EINVAL; 522 } 523 fail: 524 mutex_unlock(&rdev->pm.mutex); 525 526 return count; 527 } 528 529 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 530 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 531 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state); 532 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 533 radeon_get_dpm_forced_performance_level, 534 radeon_set_dpm_forced_performance_level); 535 536 static ssize_t radeon_hwmon_show_temp(struct device *dev, 537 struct device_attribute *attr, 538 char *buf) 539 { 540 struct drm_device *ddev = dev_get_drvdata(dev); 541 struct radeon_device *rdev = ddev->dev_private; 542 int temp; 543 544 if (rdev->asic->pm.get_temperature) 545 temp = radeon_get_temperature(rdev); 546 else 547 temp = 0; 548 549 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 550 } 551 552 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, 553 struct device_attribute *attr, 554 char *buf) 555 { 556 struct drm_device *ddev = dev_get_drvdata(dev); 557 struct radeon_device *rdev = ddev->dev_private; 558 int hyst = to_sensor_dev_attr(attr)->index; 559 int temp; 560 561 if (hyst) 562 temp = rdev->pm.dpm.thermal.min_temp; 563 else 564 temp = rdev->pm.dpm.thermal.max_temp; 565 566 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 567 } 568 569 static ssize_t radeon_hwmon_show_name(struct device *dev, 570 struct device_attribute *attr, 571 char *buf) 572 { 573 return sprintf(buf, "radeon\n"); 574 } 575 576 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 577 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 578 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 579 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 580 581 static struct attribute *hwmon_attributes[] = { 582 &sensor_dev_attr_temp1_input.dev_attr.attr, 583 &sensor_dev_attr_temp1_crit.dev_attr.attr, 584 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 585 &sensor_dev_attr_name.dev_attr.attr, 586 NULL 587 }; 588 589 static umode_t hwmon_attributes_visible(struct kobject *kobj, 590 struct attribute *attr, int index) 591 { 592 struct device *dev = container_of(kobj, struct device, kobj); 593 struct drm_device *ddev = dev_get_drvdata(dev); 594 struct radeon_device *rdev = ddev->dev_private; 595 596 /* Skip limit attributes if DPM is not enabled */ 597 if (rdev->pm.pm_method != PM_METHOD_DPM && 598 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 599 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 600 return 0; 601 602 return attr->mode; 603 } 604 605 static const struct attribute_group hwmon_attrgroup = { 606 .attrs = hwmon_attributes, 607 .is_visible = hwmon_attributes_visible, 608 }; 609 610 static int radeon_hwmon_init(struct radeon_device *rdev) 611 { 612 int err = 0; 613 614 rdev->pm.int_hwmon_dev = NULL; 615 616 switch (rdev->pm.int_thermal_type) { 617 case THERMAL_TYPE_RV6XX: 618 case THERMAL_TYPE_RV770: 619 case THERMAL_TYPE_EVERGREEN: 620 case THERMAL_TYPE_NI: 621 case THERMAL_TYPE_SUMO: 622 case THERMAL_TYPE_SI: 623 case THERMAL_TYPE_CI: 624 case THERMAL_TYPE_KV: 625 if (rdev->asic->pm.get_temperature == NULL) 626 return err; 627 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 628 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 629 err = PTR_ERR(rdev->pm.int_hwmon_dev); 630 dev_err(rdev->dev, 631 "Unable to register hwmon device: %d\n", err); 632 break; 633 } 634 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); 635 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, 636 &hwmon_attrgroup); 637 if (err) { 638 dev_err(rdev->dev, 639 "Unable to create hwmon sysfs file: %d\n", err); 640 hwmon_device_unregister(rdev->dev); 641 } 642 break; 643 default: 644 break; 645 } 646 647 return err; 648 } 649 650 static void radeon_hwmon_fini(struct radeon_device *rdev) 651 { 652 if (rdev->pm.int_hwmon_dev) { 653 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); 654 hwmon_device_unregister(rdev->pm.int_hwmon_dev); 655 } 656 } 657 658 static void radeon_dpm_thermal_work_handler(struct work_struct *work) 659 { 660 struct radeon_device *rdev = 661 container_of(work, struct radeon_device, 662 pm.dpm.thermal.work); 663 /* switch to the thermal state */ 664 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 665 666 if (!rdev->pm.dpm_enabled) 667 return; 668 669 if (rdev->asic->pm.get_temperature) { 670 int temp = radeon_get_temperature(rdev); 671 672 if (temp < rdev->pm.dpm.thermal.min_temp) 673 /* switch back the user state */ 674 dpm_state = rdev->pm.dpm.user_state; 675 } else { 676 if (rdev->pm.dpm.thermal.high_to_low) 677 /* switch back the user state */ 678 dpm_state = rdev->pm.dpm.user_state; 679 } 680 mutex_lock(&rdev->pm.mutex); 681 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 682 rdev->pm.dpm.thermal_active = true; 683 else 684 rdev->pm.dpm.thermal_active = false; 685 rdev->pm.dpm.state = dpm_state; 686 mutex_unlock(&rdev->pm.mutex); 687 688 radeon_pm_compute_clocks(rdev); 689 } 690 691 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 692 enum radeon_pm_state_type dpm_state) 693 { 694 int i; 695 struct radeon_ps *ps; 696 u32 ui_class; 697 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? 698 true : false; 699 700 /* check if the vblank period is too short to adjust the mclk */ 701 if (single_display && rdev->asic->dpm.vblank_too_short) { 702 if (radeon_dpm_vblank_too_short(rdev)) 703 single_display = false; 704 } 705 706 /* certain older asics have a separare 3D performance state, 707 * so try that first if the user selected performance 708 */ 709 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 710 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 711 /* balanced states don't exist at the moment */ 712 if (dpm_state == POWER_STATE_TYPE_BALANCED) 713 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 714 715 restart_search: 716 /* Pick the best power state based on current conditions */ 717 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 718 ps = &rdev->pm.dpm.ps[i]; 719 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 720 switch (dpm_state) { 721 /* user states */ 722 case POWER_STATE_TYPE_BATTERY: 723 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 724 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 725 if (single_display) 726 return ps; 727 } else 728 return ps; 729 } 730 break; 731 case POWER_STATE_TYPE_BALANCED: 732 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 733 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 734 if (single_display) 735 return ps; 736 } else 737 return ps; 738 } 739 break; 740 case POWER_STATE_TYPE_PERFORMANCE: 741 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 742 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 743 if (single_display) 744 return ps; 745 } else 746 return ps; 747 } 748 break; 749 /* internal states */ 750 case POWER_STATE_TYPE_INTERNAL_UVD: 751 if (rdev->pm.dpm.uvd_ps) 752 return rdev->pm.dpm.uvd_ps; 753 else 754 break; 755 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 756 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 757 return ps; 758 break; 759 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 760 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 761 return ps; 762 break; 763 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 764 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 765 return ps; 766 break; 767 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 768 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 769 return ps; 770 break; 771 case POWER_STATE_TYPE_INTERNAL_BOOT: 772 return rdev->pm.dpm.boot_ps; 773 case POWER_STATE_TYPE_INTERNAL_THERMAL: 774 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 775 return ps; 776 break; 777 case POWER_STATE_TYPE_INTERNAL_ACPI: 778 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 779 return ps; 780 break; 781 case POWER_STATE_TYPE_INTERNAL_ULV: 782 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 783 return ps; 784 break; 785 case POWER_STATE_TYPE_INTERNAL_3DPERF: 786 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 787 return ps; 788 break; 789 default: 790 break; 791 } 792 } 793 /* use a fallback state if we didn't match */ 794 switch (dpm_state) { 795 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 796 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 797 goto restart_search; 798 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 799 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 800 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 801 if (rdev->pm.dpm.uvd_ps) { 802 return rdev->pm.dpm.uvd_ps; 803 } else { 804 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 805 goto restart_search; 806 } 807 case POWER_STATE_TYPE_INTERNAL_THERMAL: 808 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 809 goto restart_search; 810 case POWER_STATE_TYPE_INTERNAL_ACPI: 811 dpm_state = POWER_STATE_TYPE_BATTERY; 812 goto restart_search; 813 case POWER_STATE_TYPE_BATTERY: 814 case POWER_STATE_TYPE_BALANCED: 815 case POWER_STATE_TYPE_INTERNAL_3DPERF: 816 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 817 goto restart_search; 818 default: 819 break; 820 } 821 822 return NULL; 823 } 824 825 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) 826 { 827 int i; 828 struct radeon_ps *ps; 829 enum radeon_pm_state_type dpm_state; 830 int ret; 831 832 /* if dpm init failed */ 833 if (!rdev->pm.dpm_enabled) 834 return; 835 836 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) { 837 /* add other state override checks here */ 838 if ((!rdev->pm.dpm.thermal_active) && 839 (!rdev->pm.dpm.uvd_active)) 840 rdev->pm.dpm.state = rdev->pm.dpm.user_state; 841 } 842 dpm_state = rdev->pm.dpm.state; 843 844 ps = radeon_dpm_pick_power_state(rdev, dpm_state); 845 if (ps) 846 rdev->pm.dpm.requested_ps = ps; 847 else 848 return; 849 850 /* no need to reprogram if nothing changed unless we are on BTC+ */ 851 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 852 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 853 /* for pre-BTC and APUs if the num crtcs changed but state is the same, 854 * all we need to do is update the display configuration. 855 */ 856 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) { 857 /* update display watermarks based on new power state */ 858 radeon_bandwidth_update(rdev); 859 /* update displays */ 860 radeon_dpm_display_configuration_changed(rdev); 861 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 862 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 863 } 864 return; 865 } else { 866 /* for BTC+ if the num crtcs hasn't changed and state is the same, 867 * nothing to do, if the num crtcs is > 1 and state is the same, 868 * update display configuration. 869 */ 870 if (rdev->pm.dpm.new_active_crtcs == 871 rdev->pm.dpm.current_active_crtcs) { 872 return; 873 } else { 874 if ((rdev->pm.dpm.current_active_crtc_count > 1) && 875 (rdev->pm.dpm.new_active_crtc_count > 1)) { 876 /* update display watermarks based on new power state */ 877 radeon_bandwidth_update(rdev); 878 /* update displays */ 879 radeon_dpm_display_configuration_changed(rdev); 880 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 881 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 882 return; 883 } 884 } 885 } 886 } 887 888 if (radeon_dpm == 1) { 889 printk("switching from power state:\n"); 890 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 891 printk("switching to power state:\n"); 892 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 893 } 894 mutex_lock(&rdev->ddev->struct_mutex); 895 down_write(&rdev->pm.mclk_lock); 896 mutex_lock(&rdev->ring_lock); 897 898 ret = radeon_dpm_pre_set_power_state(rdev); 899 if (ret) 900 goto done; 901 902 /* update display watermarks based on new power state */ 903 radeon_bandwidth_update(rdev); 904 /* update displays */ 905 radeon_dpm_display_configuration_changed(rdev); 906 907 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 908 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 909 910 /* wait for the rings to drain */ 911 for (i = 0; i < RADEON_NUM_RINGS; i++) { 912 struct radeon_ring *ring = &rdev->ring[i]; 913 if (ring->ready) 914 radeon_fence_wait_empty_locked(rdev, i); 915 } 916 917 /* program the new power state */ 918 radeon_dpm_set_power_state(rdev); 919 920 /* update current power state */ 921 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps; 922 923 radeon_dpm_post_set_power_state(rdev); 924 925 if (rdev->asic->dpm.force_performance_level) { 926 if (rdev->pm.dpm.thermal_active) { 927 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 928 /* force low perf level for thermal */ 929 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 930 /* save the user's level */ 931 rdev->pm.dpm.forced_level = level; 932 } else { 933 /* otherwise, user selected level */ 934 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level); 935 } 936 } 937 938 done: 939 mutex_unlock(&rdev->ring_lock); 940 up_write(&rdev->pm.mclk_lock); 941 mutex_unlock(&rdev->ddev->struct_mutex); 942 } 943 944 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) 945 { 946 enum radeon_pm_state_type dpm_state; 947 948 if (rdev->asic->dpm.powergate_uvd) { 949 mutex_lock(&rdev->pm.mutex); 950 /* enable/disable UVD */ 951 radeon_dpm_powergate_uvd(rdev, !enable); 952 mutex_unlock(&rdev->pm.mutex); 953 } else { 954 if (enable) { 955 mutex_lock(&rdev->pm.mutex); 956 rdev->pm.dpm.uvd_active = true; 957 /* disable this for now */ 958 #if 0 959 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 960 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 961 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 962 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 963 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) 964 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 965 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 966 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 967 else 968 #endif 969 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 970 rdev->pm.dpm.state = dpm_state; 971 mutex_unlock(&rdev->pm.mutex); 972 } else { 973 mutex_lock(&rdev->pm.mutex); 974 rdev->pm.dpm.uvd_active = false; 975 mutex_unlock(&rdev->pm.mutex); 976 } 977 978 radeon_pm_compute_clocks(rdev); 979 } 980 } 981 982 static void radeon_pm_suspend_old(struct radeon_device *rdev) 983 { 984 mutex_lock(&rdev->pm.mutex); 985 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 986 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 987 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 988 } 989 mutex_unlock(&rdev->pm.mutex); 990 991 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 992 } 993 994 static void radeon_pm_suspend_dpm(struct radeon_device *rdev) 995 { 996 mutex_lock(&rdev->pm.mutex); 997 /* disable dpm */ 998 radeon_dpm_disable(rdev); 999 /* reset the power state */ 1000 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1001 rdev->pm.dpm_enabled = false; 1002 mutex_unlock(&rdev->pm.mutex); 1003 } 1004 1005 void radeon_pm_suspend(struct radeon_device *rdev) 1006 { 1007 if (rdev->pm.pm_method == PM_METHOD_DPM) 1008 radeon_pm_suspend_dpm(rdev); 1009 else 1010 radeon_pm_suspend_old(rdev); 1011 } 1012 1013 static void radeon_pm_resume_old(struct radeon_device *rdev) 1014 { 1015 /* set up the default clocks if the MC ucode is loaded */ 1016 if ((rdev->family >= CHIP_BARTS) && 1017 (rdev->family <= CHIP_CAYMAN) && 1018 rdev->mc_fw) { 1019 if (rdev->pm.default_vddc) 1020 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1021 SET_VOLTAGE_TYPE_ASIC_VDDC); 1022 if (rdev->pm.default_vddci) 1023 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1024 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1025 if (rdev->pm.default_sclk) 1026 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1027 if (rdev->pm.default_mclk) 1028 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1029 } 1030 /* asic init will reset the default power state */ 1031 mutex_lock(&rdev->pm.mutex); 1032 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 1033 rdev->pm.current_clock_mode_index = 0; 1034 rdev->pm.current_sclk = rdev->pm.default_sclk; 1035 rdev->pm.current_mclk = rdev->pm.default_mclk; 1036 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 1037 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 1038 if (rdev->pm.pm_method == PM_METHOD_DYNPM 1039 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 1040 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1041 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1042 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1043 } 1044 mutex_unlock(&rdev->pm.mutex); 1045 radeon_pm_compute_clocks(rdev); 1046 } 1047 1048 static void radeon_pm_resume_dpm(struct radeon_device *rdev) 1049 { 1050 int ret; 1051 1052 /* asic init will reset to the boot state */ 1053 mutex_lock(&rdev->pm.mutex); 1054 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1055 radeon_dpm_setup_asic(rdev); 1056 ret = radeon_dpm_enable(rdev); 1057 mutex_unlock(&rdev->pm.mutex); 1058 if (ret) { 1059 DRM_ERROR("radeon: dpm resume failed\n"); 1060 if ((rdev->family >= CHIP_BARTS) && 1061 (rdev->family <= CHIP_CAYMAN) && 1062 rdev->mc_fw) { 1063 if (rdev->pm.default_vddc) 1064 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1065 SET_VOLTAGE_TYPE_ASIC_VDDC); 1066 if (rdev->pm.default_vddci) 1067 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1068 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1069 if (rdev->pm.default_sclk) 1070 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1071 if (rdev->pm.default_mclk) 1072 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1073 } 1074 } else { 1075 rdev->pm.dpm_enabled = true; 1076 radeon_pm_compute_clocks(rdev); 1077 } 1078 } 1079 1080 void radeon_pm_resume(struct radeon_device *rdev) 1081 { 1082 if (rdev->pm.pm_method == PM_METHOD_DPM) 1083 radeon_pm_resume_dpm(rdev); 1084 else 1085 radeon_pm_resume_old(rdev); 1086 } 1087 1088 static int radeon_pm_init_old(struct radeon_device *rdev) 1089 { 1090 int ret; 1091 1092 rdev->pm.profile = PM_PROFILE_DEFAULT; 1093 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1094 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1095 rdev->pm.dynpm_can_upclock = true; 1096 rdev->pm.dynpm_can_downclock = true; 1097 rdev->pm.default_sclk = rdev->clock.default_sclk; 1098 rdev->pm.default_mclk = rdev->clock.default_mclk; 1099 rdev->pm.current_sclk = rdev->clock.default_sclk; 1100 rdev->pm.current_mclk = rdev->clock.default_mclk; 1101 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1102 1103 if (rdev->bios) { 1104 if (rdev->is_atom_bios) 1105 radeon_atombios_get_power_modes(rdev); 1106 else 1107 radeon_combios_get_power_modes(rdev); 1108 radeon_pm_print_states(rdev); 1109 radeon_pm_init_profile(rdev); 1110 /* set up the default clocks if the MC ucode is loaded */ 1111 if ((rdev->family >= CHIP_BARTS) && 1112 (rdev->family <= CHIP_CAYMAN) && 1113 rdev->mc_fw) { 1114 if (rdev->pm.default_vddc) 1115 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1116 SET_VOLTAGE_TYPE_ASIC_VDDC); 1117 if (rdev->pm.default_vddci) 1118 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1119 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1120 if (rdev->pm.default_sclk) 1121 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1122 if (rdev->pm.default_mclk) 1123 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1124 } 1125 } 1126 1127 /* set up the internal thermal sensor if applicable */ 1128 ret = radeon_hwmon_init(rdev); 1129 if (ret) 1130 return ret; 1131 1132 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 1133 1134 if (rdev->pm.num_power_states > 1) { 1135 /* where's the best place to put these? */ 1136 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1137 if (ret) 1138 DRM_ERROR("failed to create device file for power profile\n"); 1139 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1140 if (ret) 1141 DRM_ERROR("failed to create device file for power method\n"); 1142 1143 if (radeon_debugfs_pm_init(rdev)) { 1144 DRM_ERROR("Failed to register debugfs file for PM!\n"); 1145 } 1146 1147 DRM_INFO("radeon: power management initialized\n"); 1148 } 1149 1150 return 0; 1151 } 1152 1153 static void radeon_dpm_print_power_states(struct radeon_device *rdev) 1154 { 1155 int i; 1156 1157 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 1158 printk("== power state %d ==\n", i); 1159 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]); 1160 } 1161 } 1162 1163 static int radeon_pm_init_dpm(struct radeon_device *rdev) 1164 { 1165 int ret; 1166 1167 /* default to balanced state */ 1168 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1169 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1170 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1171 rdev->pm.default_sclk = rdev->clock.default_sclk; 1172 rdev->pm.default_mclk = rdev->clock.default_mclk; 1173 rdev->pm.current_sclk = rdev->clock.default_sclk; 1174 rdev->pm.current_mclk = rdev->clock.default_mclk; 1175 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1176 1177 if (rdev->bios && rdev->is_atom_bios) 1178 radeon_atombios_get_power_modes(rdev); 1179 else 1180 return -EINVAL; 1181 1182 /* set up the internal thermal sensor if applicable */ 1183 ret = radeon_hwmon_init(rdev); 1184 if (ret) 1185 return ret; 1186 1187 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler); 1188 mutex_lock(&rdev->pm.mutex); 1189 radeon_dpm_init(rdev); 1190 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1191 if (radeon_dpm == 1) 1192 radeon_dpm_print_power_states(rdev); 1193 radeon_dpm_setup_asic(rdev); 1194 ret = radeon_dpm_enable(rdev); 1195 mutex_unlock(&rdev->pm.mutex); 1196 if (ret) { 1197 rdev->pm.dpm_enabled = false; 1198 if ((rdev->family >= CHIP_BARTS) && 1199 (rdev->family <= CHIP_CAYMAN) && 1200 rdev->mc_fw) { 1201 if (rdev->pm.default_vddc) 1202 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1203 SET_VOLTAGE_TYPE_ASIC_VDDC); 1204 if (rdev->pm.default_vddci) 1205 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1206 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1207 if (rdev->pm.default_sclk) 1208 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1209 if (rdev->pm.default_mclk) 1210 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1211 } 1212 DRM_ERROR("radeon: dpm initialization failed\n"); 1213 return ret; 1214 } 1215 rdev->pm.dpm_enabled = true; 1216 radeon_pm_compute_clocks(rdev); 1217 1218 if (rdev->pm.num_power_states > 1) { 1219 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1220 if (ret) 1221 DRM_ERROR("failed to create device file for dpm state\n"); 1222 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1223 if (ret) 1224 DRM_ERROR("failed to create device file for dpm state\n"); 1225 /* XXX: these are noops for dpm but are here for backwards compat */ 1226 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1227 if (ret) 1228 DRM_ERROR("failed to create device file for power profile\n"); 1229 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1230 if (ret) 1231 DRM_ERROR("failed to create device file for power method\n"); 1232 1233 if (radeon_debugfs_pm_init(rdev)) { 1234 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1235 } 1236 1237 DRM_INFO("radeon: dpm initialized\n"); 1238 } 1239 1240 return 0; 1241 } 1242 1243 int radeon_pm_init(struct radeon_device *rdev) 1244 { 1245 /* enable dpm on rv6xx+ */ 1246 switch (rdev->family) { 1247 case CHIP_RV610: 1248 case CHIP_RV630: 1249 case CHIP_RV620: 1250 case CHIP_RV635: 1251 case CHIP_RV670: 1252 case CHIP_RS780: 1253 case CHIP_RS880: 1254 case CHIP_CAYMAN: 1255 case CHIP_BONAIRE: 1256 case CHIP_KABINI: 1257 case CHIP_KAVERI: 1258 case CHIP_HAWAII: 1259 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1260 if (!rdev->rlc_fw) 1261 rdev->pm.pm_method = PM_METHOD_PROFILE; 1262 else if ((rdev->family >= CHIP_RV770) && 1263 (!(rdev->flags & RADEON_IS_IGP)) && 1264 (!rdev->smc_fw)) 1265 rdev->pm.pm_method = PM_METHOD_PROFILE; 1266 else if (radeon_dpm == 1) 1267 rdev->pm.pm_method = PM_METHOD_DPM; 1268 else 1269 rdev->pm.pm_method = PM_METHOD_PROFILE; 1270 break; 1271 case CHIP_RV770: 1272 case CHIP_RV730: 1273 case CHIP_RV710: 1274 case CHIP_RV740: 1275 case CHIP_CEDAR: 1276 case CHIP_REDWOOD: 1277 case CHIP_JUNIPER: 1278 case CHIP_CYPRESS: 1279 case CHIP_HEMLOCK: 1280 case CHIP_PALM: 1281 case CHIP_SUMO: 1282 case CHIP_SUMO2: 1283 case CHIP_BARTS: 1284 case CHIP_TURKS: 1285 case CHIP_CAICOS: 1286 case CHIP_ARUBA: 1287 case CHIP_TAHITI: 1288 case CHIP_PITCAIRN: 1289 case CHIP_VERDE: 1290 case CHIP_OLAND: 1291 case CHIP_HAINAN: 1292 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1293 if (!rdev->rlc_fw) 1294 rdev->pm.pm_method = PM_METHOD_PROFILE; 1295 else if ((rdev->family >= CHIP_RV770) && 1296 (!(rdev->flags & RADEON_IS_IGP)) && 1297 (!rdev->smc_fw)) 1298 rdev->pm.pm_method = PM_METHOD_PROFILE; 1299 else if (radeon_dpm == 0) 1300 rdev->pm.pm_method = PM_METHOD_PROFILE; 1301 else 1302 rdev->pm.pm_method = PM_METHOD_DPM; 1303 break; 1304 default: 1305 /* default to profile method */ 1306 rdev->pm.pm_method = PM_METHOD_PROFILE; 1307 break; 1308 } 1309 1310 if (rdev->pm.pm_method == PM_METHOD_DPM) 1311 return radeon_pm_init_dpm(rdev); 1312 else 1313 return radeon_pm_init_old(rdev); 1314 } 1315 1316 static void radeon_pm_fini_old(struct radeon_device *rdev) 1317 { 1318 if (rdev->pm.num_power_states > 1) { 1319 mutex_lock(&rdev->pm.mutex); 1320 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1321 rdev->pm.profile = PM_PROFILE_DEFAULT; 1322 radeon_pm_update_profile(rdev); 1323 radeon_pm_set_clocks(rdev); 1324 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1325 /* reset default clocks */ 1326 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1327 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1328 radeon_pm_set_clocks(rdev); 1329 } 1330 mutex_unlock(&rdev->pm.mutex); 1331 1332 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1333 1334 device_remove_file(rdev->dev, &dev_attr_power_profile); 1335 device_remove_file(rdev->dev, &dev_attr_power_method); 1336 } 1337 1338 if (rdev->pm.power_state) 1339 kfree(rdev->pm.power_state); 1340 1341 radeon_hwmon_fini(rdev); 1342 } 1343 1344 static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1345 { 1346 if (rdev->pm.num_power_states > 1) { 1347 mutex_lock(&rdev->pm.mutex); 1348 radeon_dpm_disable(rdev); 1349 mutex_unlock(&rdev->pm.mutex); 1350 1351 device_remove_file(rdev->dev, &dev_attr_power_dpm_state); 1352 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1353 /* XXX backwards compat */ 1354 device_remove_file(rdev->dev, &dev_attr_power_profile); 1355 device_remove_file(rdev->dev, &dev_attr_power_method); 1356 } 1357 radeon_dpm_fini(rdev); 1358 1359 if (rdev->pm.power_state) 1360 kfree(rdev->pm.power_state); 1361 1362 radeon_hwmon_fini(rdev); 1363 } 1364 1365 void radeon_pm_fini(struct radeon_device *rdev) 1366 { 1367 if (rdev->pm.pm_method == PM_METHOD_DPM) 1368 radeon_pm_fini_dpm(rdev); 1369 else 1370 radeon_pm_fini_old(rdev); 1371 } 1372 1373 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) 1374 { 1375 struct drm_device *ddev = rdev->ddev; 1376 struct drm_crtc *crtc; 1377 struct radeon_crtc *radeon_crtc; 1378 1379 if (rdev->pm.num_power_states < 2) 1380 return; 1381 1382 mutex_lock(&rdev->pm.mutex); 1383 1384 rdev->pm.active_crtcs = 0; 1385 rdev->pm.active_crtc_count = 0; 1386 list_for_each_entry(crtc, 1387 &ddev->mode_config.crtc_list, head) { 1388 radeon_crtc = to_radeon_crtc(crtc); 1389 if (radeon_crtc->enabled) { 1390 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 1391 rdev->pm.active_crtc_count++; 1392 } 1393 } 1394 1395 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1396 radeon_pm_update_profile(rdev); 1397 radeon_pm_set_clocks(rdev); 1398 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1399 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 1400 if (rdev->pm.active_crtc_count > 1) { 1401 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1402 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1403 1404 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 1405 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1406 radeon_pm_get_dynpm_state(rdev); 1407 radeon_pm_set_clocks(rdev); 1408 1409 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 1410 } 1411 } else if (rdev->pm.active_crtc_count == 1) { 1412 /* TODO: Increase clocks if needed for current mode */ 1413 1414 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 1415 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1416 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 1417 radeon_pm_get_dynpm_state(rdev); 1418 radeon_pm_set_clocks(rdev); 1419 1420 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1421 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1422 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 1423 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1424 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1425 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1426 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 1427 } 1428 } else { /* count == 0 */ 1429 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 1430 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1431 1432 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 1433 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 1434 radeon_pm_get_dynpm_state(rdev); 1435 radeon_pm_set_clocks(rdev); 1436 } 1437 } 1438 } 1439 } 1440 1441 mutex_unlock(&rdev->pm.mutex); 1442 } 1443 1444 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) 1445 { 1446 struct drm_device *ddev = rdev->ddev; 1447 struct drm_crtc *crtc; 1448 struct radeon_crtc *radeon_crtc; 1449 1450 mutex_lock(&rdev->pm.mutex); 1451 1452 /* update active crtc counts */ 1453 rdev->pm.dpm.new_active_crtcs = 0; 1454 rdev->pm.dpm.new_active_crtc_count = 0; 1455 list_for_each_entry(crtc, 1456 &ddev->mode_config.crtc_list, head) { 1457 radeon_crtc = to_radeon_crtc(crtc); 1458 if (crtc->enabled) { 1459 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); 1460 rdev->pm.dpm.new_active_crtc_count++; 1461 } 1462 } 1463 1464 /* update battery/ac status */ 1465 if (power_supply_is_system_supplied() > 0) 1466 rdev->pm.dpm.ac_power = true; 1467 else 1468 rdev->pm.dpm.ac_power = false; 1469 1470 radeon_dpm_change_power_state_locked(rdev); 1471 1472 mutex_unlock(&rdev->pm.mutex); 1473 1474 } 1475 1476 void radeon_pm_compute_clocks(struct radeon_device *rdev) 1477 { 1478 if (rdev->pm.pm_method == PM_METHOD_DPM) 1479 radeon_pm_compute_clocks_dpm(rdev); 1480 else 1481 radeon_pm_compute_clocks_old(rdev); 1482 } 1483 1484 static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1485 { 1486 int crtc, vpos, hpos, vbl_status; 1487 bool in_vbl = true; 1488 1489 /* Iterate over all active crtc's. All crtc's must be in vblank, 1490 * otherwise return in_vbl == false. 1491 */ 1492 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1493 if (rdev->pm.active_crtcs & (1 << crtc)) { 1494 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL); 1495 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1496 !(vbl_status & DRM_SCANOUTPOS_INVBL)) 1497 in_vbl = false; 1498 } 1499 } 1500 1501 return in_vbl; 1502 } 1503 1504 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 1505 { 1506 u32 stat_crtc = 0; 1507 bool in_vbl = radeon_pm_in_vbl(rdev); 1508 1509 if (in_vbl == false) 1510 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 1511 finish ? "exit" : "entry"); 1512 return in_vbl; 1513 } 1514 1515 static void radeon_dynpm_idle_work_handler(struct work_struct *work) 1516 { 1517 struct radeon_device *rdev; 1518 int resched; 1519 rdev = container_of(work, struct radeon_device, 1520 pm.dynpm_idle_work.work); 1521 1522 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1523 mutex_lock(&rdev->pm.mutex); 1524 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1525 int not_processed = 0; 1526 int i; 1527 1528 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1529 struct radeon_ring *ring = &rdev->ring[i]; 1530 1531 if (ring->ready) { 1532 not_processed += radeon_fence_count_emitted(rdev, i); 1533 if (not_processed >= 3) 1534 break; 1535 } 1536 } 1537 1538 if (not_processed >= 3) { /* should upclock */ 1539 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 1540 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1541 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1542 rdev->pm.dynpm_can_upclock) { 1543 rdev->pm.dynpm_planned_action = 1544 DYNPM_ACTION_UPCLOCK; 1545 rdev->pm.dynpm_action_timeout = jiffies + 1546 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1547 } 1548 } else if (not_processed == 0) { /* should downclock */ 1549 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 1550 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1551 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1552 rdev->pm.dynpm_can_downclock) { 1553 rdev->pm.dynpm_planned_action = 1554 DYNPM_ACTION_DOWNCLOCK; 1555 rdev->pm.dynpm_action_timeout = jiffies + 1556 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1557 } 1558 } 1559 1560 /* Note, radeon_pm_set_clocks is called with static_switch set 1561 * to false since we want to wait for vbl to avoid flicker. 1562 */ 1563 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 1564 jiffies > rdev->pm.dynpm_action_timeout) { 1565 radeon_pm_get_dynpm_state(rdev); 1566 radeon_pm_set_clocks(rdev); 1567 } 1568 1569 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1570 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1571 } 1572 mutex_unlock(&rdev->pm.mutex); 1573 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1574 } 1575 1576 /* 1577 * Debugfs info 1578 */ 1579 #if defined(CONFIG_DEBUG_FS) 1580 1581 static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 1582 { 1583 struct drm_info_node *node = (struct drm_info_node *) m->private; 1584 struct drm_device *dev = node->minor->dev; 1585 struct radeon_device *rdev = dev->dev_private; 1586 1587 if (rdev->pm.dpm_enabled) { 1588 mutex_lock(&rdev->pm.mutex); 1589 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1590 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1591 else 1592 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1593 mutex_unlock(&rdev->pm.mutex); 1594 } else { 1595 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 1596 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ 1597 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) 1598 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); 1599 else 1600 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 1601 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 1602 if (rdev->asic->pm.get_memory_clock) 1603 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 1604 if (rdev->pm.current_vddc) 1605 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 1606 if (rdev->asic->pm.get_pcie_lanes) 1607 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 1608 } 1609 1610 return 0; 1611 } 1612 1613 static struct drm_info_list radeon_pm_info_list[] = { 1614 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 1615 }; 1616 #endif 1617 1618 static int radeon_debugfs_pm_init(struct radeon_device *rdev) 1619 { 1620 #if defined(CONFIG_DEBUG_FS) 1621 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 1622 #else 1623 return 0; 1624 #endif 1625 } 1626