17433874eSRafał Miłecki /* 27433874eSRafał Miłecki * Permission is hereby granted, free of charge, to any person obtaining a 37433874eSRafał Miłecki * copy of this software and associated documentation files (the "Software"), 47433874eSRafał Miłecki * to deal in the Software without restriction, including without limitation 57433874eSRafał Miłecki * the rights to use, copy, modify, merge, publish, distribute, sublicense, 67433874eSRafał Miłecki * and/or sell copies of the Software, and to permit persons to whom the 77433874eSRafał Miłecki * Software is furnished to do so, subject to the following conditions: 87433874eSRafał Miłecki * 97433874eSRafał Miłecki * The above copyright notice and this permission notice shall be included in 107433874eSRafał Miłecki * all copies or substantial portions of the Software. 117433874eSRafał Miłecki * 127433874eSRafał Miłecki * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 137433874eSRafał Miłecki * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 147433874eSRafał Miłecki * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 157433874eSRafał Miłecki * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 167433874eSRafał Miłecki * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 177433874eSRafał Miłecki * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 187433874eSRafał Miłecki * OTHER DEALINGS IN THE SOFTWARE. 197433874eSRafał Miłecki * 207433874eSRafał Miłecki * Authors: Rafał Miłecki <zajec5@gmail.com> 2156278a8eSAlex Deucher * Alex Deucher <alexdeucher@gmail.com> 227433874eSRafał Miłecki */ 23760285e7SDavid Howells #include <drm/drmP.h> 247433874eSRafał Miłecki #include "radeon.h" 25f735261bSDave Airlie #include "avivod.h" 268a83ec5eSAlex Deucher #include "atom.h" 27ce8f5370SAlex Deucher #include <linux/power_supply.h> 2821a8122aSAlex Deucher #include <linux/hwmon.h> 2921a8122aSAlex Deucher #include <linux/hwmon-sysfs.h> 307433874eSRafał Miłecki 31c913e23aSRafał Miłecki #define RADEON_IDLE_LOOP_MS 100 32c913e23aSRafał Miłecki #define RADEON_RECLOCK_DELAY_MS 200 3373a6d3fcSRafał Miłecki #define RADEON_WAIT_VBLANK_TIMEOUT 200 34c913e23aSRafał Miłecki 35f712d0c7SRafał Miłecki static const char *radeon_pm_state_type_name[5] = { 36eb2c27a0SAlex Deucher "", 37f712d0c7SRafał Miłecki "Powersave", 38f712d0c7SRafał Miłecki "Battery", 39f712d0c7SRafał Miłecki "Balanced", 40f712d0c7SRafał Miłecki "Performance", 41f712d0c7SRafał Miłecki }; 42f712d0c7SRafał Miłecki 43ce8f5370SAlex Deucher static void radeon_dynpm_idle_work_handler(struct work_struct *work); 44c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev); 45ce8f5370SAlex Deucher static bool radeon_pm_in_vbl(struct radeon_device *rdev); 46ce8f5370SAlex Deucher static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 47ce8f5370SAlex Deucher static void radeon_pm_update_profile(struct radeon_device *rdev); 48ce8f5370SAlex Deucher static void radeon_pm_set_clocks(struct radeon_device *rdev); 49ce8f5370SAlex Deucher 50a4c9e2eeSAlex Deucher int radeon_pm_get_type_index(struct radeon_device *rdev, 51a4c9e2eeSAlex Deucher enum radeon_pm_state_type ps_type, 52a4c9e2eeSAlex Deucher int instance) 53a4c9e2eeSAlex Deucher { 54a4c9e2eeSAlex Deucher int i; 55a4c9e2eeSAlex Deucher int found_instance = -1; 56a4c9e2eeSAlex Deucher 57a4c9e2eeSAlex Deucher for (i = 0; i < rdev->pm.num_power_states; i++) { 58a4c9e2eeSAlex Deucher if (rdev->pm.power_state[i].type == ps_type) { 59a4c9e2eeSAlex Deucher found_instance++; 60a4c9e2eeSAlex Deucher if (found_instance == instance) 61a4c9e2eeSAlex Deucher return i; 62a4c9e2eeSAlex Deucher } 63a4c9e2eeSAlex Deucher } 64a4c9e2eeSAlex Deucher /* return default if no match */ 65a4c9e2eeSAlex Deucher return rdev->pm.default_power_state_index; 66a4c9e2eeSAlex Deucher } 67a4c9e2eeSAlex Deucher 68c4917074SAlex Deucher void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 69ce8f5370SAlex Deucher { 70ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 71ce8f5370SAlex Deucher if (rdev->pm.profile == PM_PROFILE_AUTO) { 72ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 73ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 74ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 75ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 76ce8f5370SAlex Deucher } 77ce8f5370SAlex Deucher } 78ce8f5370SAlex Deucher } 79ce8f5370SAlex Deucher 80ce8f5370SAlex Deucher static void radeon_pm_update_profile(struct radeon_device *rdev) 81ce8f5370SAlex Deucher { 82ce8f5370SAlex Deucher switch (rdev->pm.profile) { 83ce8f5370SAlex Deucher case PM_PROFILE_DEFAULT: 84ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 85ce8f5370SAlex Deucher break; 86ce8f5370SAlex Deucher case PM_PROFILE_AUTO: 87ce8f5370SAlex Deucher if (power_supply_is_system_supplied() > 0) { 88ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 89ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 90ce8f5370SAlex Deucher else 91ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 92ce8f5370SAlex Deucher } else { 93ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 94c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 95ce8f5370SAlex Deucher else 96c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 97ce8f5370SAlex Deucher } 98ce8f5370SAlex Deucher break; 99ce8f5370SAlex Deucher case PM_PROFILE_LOW: 100ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 101ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 102ce8f5370SAlex Deucher else 103ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 104ce8f5370SAlex Deucher break; 105c9e75b21SAlex Deucher case PM_PROFILE_MID: 106c9e75b21SAlex Deucher if (rdev->pm.active_crtc_count > 1) 107c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 108c9e75b21SAlex Deucher else 109c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 110c9e75b21SAlex Deucher break; 111ce8f5370SAlex Deucher case PM_PROFILE_HIGH: 112ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 113ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 114ce8f5370SAlex Deucher else 115ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 116ce8f5370SAlex Deucher break; 117ce8f5370SAlex Deucher } 118ce8f5370SAlex Deucher 119ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count == 0) { 120ce8f5370SAlex Deucher rdev->pm.requested_power_state_index = 121ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 122ce8f5370SAlex Deucher rdev->pm.requested_clock_mode_index = 123ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 124ce8f5370SAlex Deucher } else { 125ce8f5370SAlex Deucher rdev->pm.requested_power_state_index = 126ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 127ce8f5370SAlex Deucher rdev->pm.requested_clock_mode_index = 128ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 129ce8f5370SAlex Deucher } 130ce8f5370SAlex Deucher } 131c913e23aSRafał Miłecki 1325876dd24SMatthew Garrett static void radeon_unmap_vram_bos(struct radeon_device *rdev) 1335876dd24SMatthew Garrett { 1345876dd24SMatthew Garrett struct radeon_bo *bo, *n; 1355876dd24SMatthew Garrett 1365876dd24SMatthew Garrett if (list_empty(&rdev->gem.objects)) 1375876dd24SMatthew Garrett return; 1385876dd24SMatthew Garrett 1395876dd24SMatthew Garrett list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 1405876dd24SMatthew Garrett if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 1415876dd24SMatthew Garrett ttm_bo_unmap_virtual(&bo->tbo); 1425876dd24SMatthew Garrett } 1435876dd24SMatthew Garrett } 1445876dd24SMatthew Garrett 145ce8f5370SAlex Deucher static void radeon_sync_with_vblank(struct radeon_device *rdev) 146ce8f5370SAlex Deucher { 147ce8f5370SAlex Deucher if (rdev->pm.active_crtcs) { 148ce8f5370SAlex Deucher rdev->pm.vblank_sync = false; 149ce8f5370SAlex Deucher wait_event_timeout( 150ce8f5370SAlex Deucher rdev->irq.vblank_queue, rdev->pm.vblank_sync, 151ce8f5370SAlex Deucher msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 152ce8f5370SAlex Deucher } 153ce8f5370SAlex Deucher } 154ce8f5370SAlex Deucher 155ce8f5370SAlex Deucher static void radeon_set_power_state(struct radeon_device *rdev) 156ce8f5370SAlex Deucher { 157ce8f5370SAlex Deucher u32 sclk, mclk; 15892645879SAlex Deucher bool misc_after = false; 159ce8f5370SAlex Deucher 160ce8f5370SAlex Deucher if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 161ce8f5370SAlex Deucher (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 162ce8f5370SAlex Deucher return; 163ce8f5370SAlex Deucher 164ce8f5370SAlex Deucher if (radeon_gui_idle(rdev)) { 165ce8f5370SAlex Deucher sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 166ce8f5370SAlex Deucher clock_info[rdev->pm.requested_clock_mode_index].sclk; 1679ace9f7bSAlex Deucher if (sclk > rdev->pm.default_sclk) 1689ace9f7bSAlex Deucher sclk = rdev->pm.default_sclk; 169ce8f5370SAlex Deucher 17027810fb2SAlex Deucher /* starting with BTC, there is one state that is used for both 17127810fb2SAlex Deucher * MH and SH. Difference is that we always use the high clock index for 1727ae764b1SAlex Deucher * mclk and vddci. 17327810fb2SAlex Deucher */ 17427810fb2SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 17527810fb2SAlex Deucher (rdev->family >= CHIP_BARTS) && 17627810fb2SAlex Deucher rdev->pm.active_crtc_count && 17727810fb2SAlex Deucher ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 17827810fb2SAlex Deucher (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 17927810fb2SAlex Deucher mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 18027810fb2SAlex Deucher clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 18127810fb2SAlex Deucher else 182ce8f5370SAlex Deucher mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 183ce8f5370SAlex Deucher clock_info[rdev->pm.requested_clock_mode_index].mclk; 18427810fb2SAlex Deucher 1859ace9f7bSAlex Deucher if (mclk > rdev->pm.default_mclk) 1869ace9f7bSAlex Deucher mclk = rdev->pm.default_mclk; 187ce8f5370SAlex Deucher 18892645879SAlex Deucher /* upvolt before raising clocks, downvolt after lowering clocks */ 18992645879SAlex Deucher if (sclk < rdev->pm.current_sclk) 19092645879SAlex Deucher misc_after = true; 19192645879SAlex Deucher 19292645879SAlex Deucher radeon_sync_with_vblank(rdev); 19392645879SAlex Deucher 19492645879SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 19592645879SAlex Deucher if (!radeon_pm_in_vbl(rdev)) 19692645879SAlex Deucher return; 19792645879SAlex Deucher } 19892645879SAlex Deucher 19992645879SAlex Deucher radeon_pm_prepare(rdev); 20092645879SAlex Deucher 20192645879SAlex Deucher if (!misc_after) 202ce8f5370SAlex Deucher /* voltage, pcie lanes, etc.*/ 203ce8f5370SAlex Deucher radeon_pm_misc(rdev); 204ce8f5370SAlex Deucher 205ce8f5370SAlex Deucher /* set engine clock */ 206ce8f5370SAlex Deucher if (sclk != rdev->pm.current_sclk) { 207ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, false); 208ce8f5370SAlex Deucher radeon_set_engine_clock(rdev, sclk); 209ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, true); 210ce8f5370SAlex Deucher rdev->pm.current_sclk = sclk; 211d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 212ce8f5370SAlex Deucher } 213ce8f5370SAlex Deucher 214ce8f5370SAlex Deucher /* set memory clock */ 215798bcf73SAlex Deucher if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 216ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, false); 217ce8f5370SAlex Deucher radeon_set_memory_clock(rdev, mclk); 218ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, true); 219ce8f5370SAlex Deucher rdev->pm.current_mclk = mclk; 220d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 221ce8f5370SAlex Deucher } 22292645879SAlex Deucher 22392645879SAlex Deucher if (misc_after) 22492645879SAlex Deucher /* voltage, pcie lanes, etc.*/ 22592645879SAlex Deucher radeon_pm_misc(rdev); 22692645879SAlex Deucher 227ce8f5370SAlex Deucher radeon_pm_finish(rdev); 228ce8f5370SAlex Deucher 229ce8f5370SAlex Deucher rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 230ce8f5370SAlex Deucher rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 231ce8f5370SAlex Deucher } else 232d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 233ce8f5370SAlex Deucher } 234ce8f5370SAlex Deucher 235ce8f5370SAlex Deucher static void radeon_pm_set_clocks(struct radeon_device *rdev) 236a424816fSAlex Deucher { 2375f8f635eSJerome Glisse int i, r; 2382aba631cSMatthew Garrett 2394e186b2dSAlex Deucher /* no need to take locks, etc. if nothing's going to change */ 2404e186b2dSAlex Deucher if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 2414e186b2dSAlex Deucher (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 2424e186b2dSAlex Deucher return; 2434e186b2dSAlex Deucher 244612e06ceSMatthew Garrett mutex_lock(&rdev->ddev->struct_mutex); 245db7fce39SChristian König down_write(&rdev->pm.mclk_lock); 246d6999bc7SChristian König mutex_lock(&rdev->ring_lock); 2474f3218cbSAlex Deucher 24895f5a3acSAlex Deucher /* wait for the rings to drain */ 24995f5a3acSAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; i++) { 25095f5a3acSAlex Deucher struct radeon_ring *ring = &rdev->ring[i]; 2515f8f635eSJerome Glisse if (!ring->ready) { 2525f8f635eSJerome Glisse continue; 2535f8f635eSJerome Glisse } 2545f8f635eSJerome Glisse r = radeon_fence_wait_empty_locked(rdev, i); 2555f8f635eSJerome Glisse if (r) { 2565f8f635eSJerome Glisse /* needs a GPU reset dont reset here */ 2575f8f635eSJerome Glisse mutex_unlock(&rdev->ring_lock); 2585f8f635eSJerome Glisse up_write(&rdev->pm.mclk_lock); 2595f8f635eSJerome Glisse mutex_unlock(&rdev->ddev->struct_mutex); 2605f8f635eSJerome Glisse return; 2615f8f635eSJerome Glisse } 262ce8f5370SAlex Deucher } 26395f5a3acSAlex Deucher 2645876dd24SMatthew Garrett radeon_unmap_vram_bos(rdev); 2655876dd24SMatthew Garrett 266ce8f5370SAlex Deucher if (rdev->irq.installed) { 2672aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 2682aba631cSMatthew Garrett if (rdev->pm.active_crtcs & (1 << i)) { 2692aba631cSMatthew Garrett rdev->pm.req_vblank |= (1 << i); 2702aba631cSMatthew Garrett drm_vblank_get(rdev->ddev, i); 2712aba631cSMatthew Garrett } 2722aba631cSMatthew Garrett } 2732aba631cSMatthew Garrett } 2742aba631cSMatthew Garrett 275ce8f5370SAlex Deucher radeon_set_power_state(rdev); 2762aba631cSMatthew Garrett 277ce8f5370SAlex Deucher if (rdev->irq.installed) { 2782aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 2792aba631cSMatthew Garrett if (rdev->pm.req_vblank & (1 << i)) { 2802aba631cSMatthew Garrett rdev->pm.req_vblank &= ~(1 << i); 2812aba631cSMatthew Garrett drm_vblank_put(rdev->ddev, i); 2822aba631cSMatthew Garrett } 2832aba631cSMatthew Garrett } 2842aba631cSMatthew Garrett } 285a424816fSAlex Deucher 286a424816fSAlex Deucher /* update display watermarks based on new power state */ 287a424816fSAlex Deucher radeon_update_bandwidth_info(rdev); 288a424816fSAlex Deucher if (rdev->pm.active_crtc_count) 289a424816fSAlex Deucher radeon_bandwidth_update(rdev); 290a424816fSAlex Deucher 291ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 2922aba631cSMatthew Garrett 293d6999bc7SChristian König mutex_unlock(&rdev->ring_lock); 294db7fce39SChristian König up_write(&rdev->pm.mclk_lock); 295612e06ceSMatthew Garrett mutex_unlock(&rdev->ddev->struct_mutex); 296a424816fSAlex Deucher } 297a424816fSAlex Deucher 298f712d0c7SRafał Miłecki static void radeon_pm_print_states(struct radeon_device *rdev) 299f712d0c7SRafał Miłecki { 300f712d0c7SRafał Miłecki int i, j; 301f712d0c7SRafał Miłecki struct radeon_power_state *power_state; 302f712d0c7SRafał Miłecki struct radeon_pm_clock_info *clock_info; 303f712d0c7SRafał Miłecki 304d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 305f712d0c7SRafał Miłecki for (i = 0; i < rdev->pm.num_power_states; i++) { 306f712d0c7SRafał Miłecki power_state = &rdev->pm.power_state[i]; 307d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("State %d: %s\n", i, 308f712d0c7SRafał Miłecki radeon_pm_state_type_name[power_state->type]); 309f712d0c7SRafał Miłecki if (i == rdev->pm.default_power_state_index) 310d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\tDefault"); 311f712d0c7SRafał Miłecki if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 312d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 313f712d0c7SRafał Miłecki if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 314d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\tSingle display only\n"); 315d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 316f712d0c7SRafał Miłecki for (j = 0; j < power_state->num_clock_modes; j++) { 317f712d0c7SRafał Miłecki clock_info = &(power_state->clock_info[j]); 318f712d0c7SRafał Miłecki if (rdev->flags & RADEON_IS_IGP) 319eb2c27a0SAlex Deucher DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 320f712d0c7SRafał Miłecki j, 321eb2c27a0SAlex Deucher clock_info->sclk * 10); 322f712d0c7SRafał Miłecki else 323eb2c27a0SAlex Deucher DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 324f712d0c7SRafał Miłecki j, 325f712d0c7SRafał Miłecki clock_info->sclk * 10, 326f712d0c7SRafał Miłecki clock_info->mclk * 10, 327eb2c27a0SAlex Deucher clock_info->voltage.voltage); 328f712d0c7SRafał Miłecki } 329f712d0c7SRafał Miłecki } 330f712d0c7SRafał Miłecki } 331f712d0c7SRafał Miłecki 332ce8f5370SAlex Deucher static ssize_t radeon_get_pm_profile(struct device *dev, 333a424816fSAlex Deucher struct device_attribute *attr, 334a424816fSAlex Deucher char *buf) 335a424816fSAlex Deucher { 336a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 337a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 338ce8f5370SAlex Deucher int cp = rdev->pm.profile; 339a424816fSAlex Deucher 340a424816fSAlex Deucher return snprintf(buf, PAGE_SIZE, "%s\n", 341ce8f5370SAlex Deucher (cp == PM_PROFILE_AUTO) ? "auto" : 342ce8f5370SAlex Deucher (cp == PM_PROFILE_LOW) ? "low" : 34312e27be8SDaniel J Blueman (cp == PM_PROFILE_MID) ? "mid" : 344ce8f5370SAlex Deucher (cp == PM_PROFILE_HIGH) ? "high" : "default"); 345a424816fSAlex Deucher } 346a424816fSAlex Deucher 347ce8f5370SAlex Deucher static ssize_t radeon_set_pm_profile(struct device *dev, 348a424816fSAlex Deucher struct device_attribute *attr, 349a424816fSAlex Deucher const char *buf, 350a424816fSAlex Deucher size_t count) 351a424816fSAlex Deucher { 352a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 353a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 354a424816fSAlex Deucher 355a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 356ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 357ce8f5370SAlex Deucher if (strncmp("default", buf, strlen("default")) == 0) 358ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_DEFAULT; 359ce8f5370SAlex Deucher else if (strncmp("auto", buf, strlen("auto")) == 0) 360ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_AUTO; 361ce8f5370SAlex Deucher else if (strncmp("low", buf, strlen("low")) == 0) 362ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_LOW; 363c9e75b21SAlex Deucher else if (strncmp("mid", buf, strlen("mid")) == 0) 364c9e75b21SAlex Deucher rdev->pm.profile = PM_PROFILE_MID; 365ce8f5370SAlex Deucher else if (strncmp("high", buf, strlen("high")) == 0) 366ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_HIGH; 367ce8f5370SAlex Deucher else { 3681783e4bfSThomas Renninger count = -EINVAL; 369ce8f5370SAlex Deucher goto fail; 370ce8f5370SAlex Deucher } 371ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 372ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 3731783e4bfSThomas Renninger } else 3741783e4bfSThomas Renninger count = -EINVAL; 3751783e4bfSThomas Renninger 376ce8f5370SAlex Deucher fail: 377a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 378a424816fSAlex Deucher 379a424816fSAlex Deucher return count; 380a424816fSAlex Deucher } 381a424816fSAlex Deucher 382ce8f5370SAlex Deucher static ssize_t radeon_get_pm_method(struct device *dev, 383ce8f5370SAlex Deucher struct device_attribute *attr, 384ce8f5370SAlex Deucher char *buf) 38556278a8eSAlex Deucher { 386ce8f5370SAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 387ce8f5370SAlex Deucher struct radeon_device *rdev = ddev->dev_private; 388ce8f5370SAlex Deucher int pm = rdev->pm.pm_method; 38956278a8eSAlex Deucher 390ce8f5370SAlex Deucher return snprintf(buf, PAGE_SIZE, "%s\n", 391*da321c8aSAlex Deucher (pm == PM_METHOD_DYNPM) ? "dynpm" : 392*da321c8aSAlex Deucher (pm == PM_METHOD_PROFILE) ? "profile" : "dpm"); 39356278a8eSAlex Deucher } 39456278a8eSAlex Deucher 395ce8f5370SAlex Deucher static ssize_t radeon_set_pm_method(struct device *dev, 396ce8f5370SAlex Deucher struct device_attribute *attr, 397ce8f5370SAlex Deucher const char *buf, 398ce8f5370SAlex Deucher size_t count) 399d0d6cb81SRafał Miłecki { 400ce8f5370SAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 401ce8f5370SAlex Deucher struct radeon_device *rdev = ddev->dev_private; 402ce8f5370SAlex Deucher 403*da321c8aSAlex Deucher /* we don't support the legacy modes with dpm */ 404*da321c8aSAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DPM) { 405*da321c8aSAlex Deucher count = -EINVAL; 406*da321c8aSAlex Deucher goto fail; 407*da321c8aSAlex Deucher } 408ce8f5370SAlex Deucher 409ce8f5370SAlex Deucher if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 410ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 411ce8f5370SAlex Deucher rdev->pm.pm_method = PM_METHOD_DYNPM; 412ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 413ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 414ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 415ce8f5370SAlex Deucher } else if (strncmp("profile", buf, strlen("profile")) == 0) { 416ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 417ce8f5370SAlex Deucher /* disable dynpm */ 418ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 419ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 4203f53eb6fSRafael J. Wysocki rdev->pm.pm_method = PM_METHOD_PROFILE; 421ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 42232c87fcaSTejun Heo cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 423ce8f5370SAlex Deucher } else { 4241783e4bfSThomas Renninger count = -EINVAL; 425ce8f5370SAlex Deucher goto fail; 426d0d6cb81SRafał Miłecki } 427ce8f5370SAlex Deucher radeon_pm_compute_clocks(rdev); 428ce8f5370SAlex Deucher fail: 429ce8f5370SAlex Deucher return count; 430ce8f5370SAlex Deucher } 431ce8f5370SAlex Deucher 432*da321c8aSAlex Deucher static ssize_t radeon_get_dpm_state(struct device *dev, 433*da321c8aSAlex Deucher struct device_attribute *attr, 434*da321c8aSAlex Deucher char *buf) 435*da321c8aSAlex Deucher { 436*da321c8aSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 437*da321c8aSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 438*da321c8aSAlex Deucher enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 439*da321c8aSAlex Deucher 440*da321c8aSAlex Deucher return snprintf(buf, PAGE_SIZE, "%s\n", 441*da321c8aSAlex Deucher (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 442*da321c8aSAlex Deucher (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 443*da321c8aSAlex Deucher } 444*da321c8aSAlex Deucher 445*da321c8aSAlex Deucher static ssize_t radeon_set_dpm_state(struct device *dev, 446*da321c8aSAlex Deucher struct device_attribute *attr, 447*da321c8aSAlex Deucher const char *buf, 448*da321c8aSAlex Deucher size_t count) 449*da321c8aSAlex Deucher { 450*da321c8aSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 451*da321c8aSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 452*da321c8aSAlex Deucher 453*da321c8aSAlex Deucher mutex_lock(&rdev->pm.mutex); 454*da321c8aSAlex Deucher if (strncmp("battery", buf, strlen("battery")) == 0) 455*da321c8aSAlex Deucher rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 456*da321c8aSAlex Deucher else if (strncmp("balanced", buf, strlen("balanced")) == 0) 457*da321c8aSAlex Deucher rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 458*da321c8aSAlex Deucher else if (strncmp("performance", buf, strlen("performance")) == 0) 459*da321c8aSAlex Deucher rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 460*da321c8aSAlex Deucher else { 461*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 462*da321c8aSAlex Deucher count = -EINVAL; 463*da321c8aSAlex Deucher goto fail; 464*da321c8aSAlex Deucher } 465*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 466*da321c8aSAlex Deucher radeon_pm_compute_clocks(rdev); 467*da321c8aSAlex Deucher fail: 468*da321c8aSAlex Deucher return count; 469*da321c8aSAlex Deucher } 470*da321c8aSAlex Deucher 471ce8f5370SAlex Deucher static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 472ce8f5370SAlex Deucher static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 473*da321c8aSAlex Deucher static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state); 474ce8f5370SAlex Deucher 47521a8122aSAlex Deucher static ssize_t radeon_hwmon_show_temp(struct device *dev, 47621a8122aSAlex Deucher struct device_attribute *attr, 47721a8122aSAlex Deucher char *buf) 47821a8122aSAlex Deucher { 47921a8122aSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 48021a8122aSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 48120d391d7SAlex Deucher int temp; 48221a8122aSAlex Deucher 4836bd1c385SAlex Deucher if (rdev->asic->pm.get_temperature) 4846bd1c385SAlex Deucher temp = radeon_get_temperature(rdev); 4856bd1c385SAlex Deucher else 48621a8122aSAlex Deucher temp = 0; 48721a8122aSAlex Deucher 48821a8122aSAlex Deucher return snprintf(buf, PAGE_SIZE, "%d\n", temp); 48921a8122aSAlex Deucher } 49021a8122aSAlex Deucher 49121a8122aSAlex Deucher static ssize_t radeon_hwmon_show_name(struct device *dev, 49221a8122aSAlex Deucher struct device_attribute *attr, 49321a8122aSAlex Deucher char *buf) 49421a8122aSAlex Deucher { 49521a8122aSAlex Deucher return sprintf(buf, "radeon\n"); 49621a8122aSAlex Deucher } 49721a8122aSAlex Deucher 49821a8122aSAlex Deucher static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 49921a8122aSAlex Deucher static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 50021a8122aSAlex Deucher 50121a8122aSAlex Deucher static struct attribute *hwmon_attributes[] = { 50221a8122aSAlex Deucher &sensor_dev_attr_temp1_input.dev_attr.attr, 50321a8122aSAlex Deucher &sensor_dev_attr_name.dev_attr.attr, 50421a8122aSAlex Deucher NULL 50521a8122aSAlex Deucher }; 50621a8122aSAlex Deucher 50721a8122aSAlex Deucher static const struct attribute_group hwmon_attrgroup = { 50821a8122aSAlex Deucher .attrs = hwmon_attributes, 50921a8122aSAlex Deucher }; 51021a8122aSAlex Deucher 5110d18abedSDan Carpenter static int radeon_hwmon_init(struct radeon_device *rdev) 51221a8122aSAlex Deucher { 5130d18abedSDan Carpenter int err = 0; 51421a8122aSAlex Deucher 51521a8122aSAlex Deucher rdev->pm.int_hwmon_dev = NULL; 51621a8122aSAlex Deucher 51721a8122aSAlex Deucher switch (rdev->pm.int_thermal_type) { 51821a8122aSAlex Deucher case THERMAL_TYPE_RV6XX: 51921a8122aSAlex Deucher case THERMAL_TYPE_RV770: 52021a8122aSAlex Deucher case THERMAL_TYPE_EVERGREEN: 521457558edSAlex Deucher case THERMAL_TYPE_NI: 522e33df25fSAlex Deucher case THERMAL_TYPE_SUMO: 5231bd47d2eSAlex Deucher case THERMAL_TYPE_SI: 5246bd1c385SAlex Deucher if (rdev->asic->pm.get_temperature == NULL) 5255d7486c7SAlex Deucher return err; 52621a8122aSAlex Deucher rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 5270d18abedSDan Carpenter if (IS_ERR(rdev->pm.int_hwmon_dev)) { 5280d18abedSDan Carpenter err = PTR_ERR(rdev->pm.int_hwmon_dev); 5290d18abedSDan Carpenter dev_err(rdev->dev, 5300d18abedSDan Carpenter "Unable to register hwmon device: %d\n", err); 5310d18abedSDan Carpenter break; 5320d18abedSDan Carpenter } 53321a8122aSAlex Deucher dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); 53421a8122aSAlex Deucher err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, 53521a8122aSAlex Deucher &hwmon_attrgroup); 5360d18abedSDan Carpenter if (err) { 5370d18abedSDan Carpenter dev_err(rdev->dev, 5380d18abedSDan Carpenter "Unable to create hwmon sysfs file: %d\n", err); 5390d18abedSDan Carpenter hwmon_device_unregister(rdev->dev); 5400d18abedSDan Carpenter } 54121a8122aSAlex Deucher break; 54221a8122aSAlex Deucher default: 54321a8122aSAlex Deucher break; 54421a8122aSAlex Deucher } 5450d18abedSDan Carpenter 5460d18abedSDan Carpenter return err; 54721a8122aSAlex Deucher } 54821a8122aSAlex Deucher 54921a8122aSAlex Deucher static void radeon_hwmon_fini(struct radeon_device *rdev) 55021a8122aSAlex Deucher { 55121a8122aSAlex Deucher if (rdev->pm.int_hwmon_dev) { 55221a8122aSAlex Deucher sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); 55321a8122aSAlex Deucher hwmon_device_unregister(rdev->pm.int_hwmon_dev); 55421a8122aSAlex Deucher } 55521a8122aSAlex Deucher } 55621a8122aSAlex Deucher 557*da321c8aSAlex Deucher static void radeon_dpm_thermal_work_handler(struct work_struct *work) 558*da321c8aSAlex Deucher { 559*da321c8aSAlex Deucher struct radeon_device *rdev = 560*da321c8aSAlex Deucher container_of(work, struct radeon_device, 561*da321c8aSAlex Deucher pm.dpm.thermal.work); 562*da321c8aSAlex Deucher /* switch to the thermal state */ 563*da321c8aSAlex Deucher enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 564*da321c8aSAlex Deucher 565*da321c8aSAlex Deucher if (!rdev->pm.dpm_enabled) 566*da321c8aSAlex Deucher return; 567*da321c8aSAlex Deucher 568*da321c8aSAlex Deucher if (rdev->asic->pm.get_temperature) { 569*da321c8aSAlex Deucher int temp = radeon_get_temperature(rdev); 570*da321c8aSAlex Deucher 571*da321c8aSAlex Deucher if (temp < rdev->pm.dpm.thermal.min_temp) 572*da321c8aSAlex Deucher /* switch back the user state */ 573*da321c8aSAlex Deucher dpm_state = rdev->pm.dpm.user_state; 574*da321c8aSAlex Deucher } else { 575*da321c8aSAlex Deucher if (rdev->pm.dpm.thermal.high_to_low) 576*da321c8aSAlex Deucher /* switch back the user state */ 577*da321c8aSAlex Deucher dpm_state = rdev->pm.dpm.user_state; 578*da321c8aSAlex Deucher } 579*da321c8aSAlex Deucher radeon_dpm_enable_power_state(rdev, dpm_state); 580*da321c8aSAlex Deucher } 581*da321c8aSAlex Deucher 582*da321c8aSAlex Deucher static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 583*da321c8aSAlex Deucher enum radeon_pm_state_type dpm_state) 584*da321c8aSAlex Deucher { 585*da321c8aSAlex Deucher int i; 586*da321c8aSAlex Deucher struct radeon_ps *ps; 587*da321c8aSAlex Deucher u32 ui_class; 588*da321c8aSAlex Deucher 589*da321c8aSAlex Deucher restart_search: 590*da321c8aSAlex Deucher /* balanced states don't exist at the moment */ 591*da321c8aSAlex Deucher if (dpm_state == POWER_STATE_TYPE_BALANCED) 592*da321c8aSAlex Deucher dpm_state = POWER_STATE_TYPE_PERFORMANCE; 593*da321c8aSAlex Deucher 594*da321c8aSAlex Deucher /* Pick the best power state based on current conditions */ 595*da321c8aSAlex Deucher for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 596*da321c8aSAlex Deucher ps = &rdev->pm.dpm.ps[i]; 597*da321c8aSAlex Deucher ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 598*da321c8aSAlex Deucher switch (dpm_state) { 599*da321c8aSAlex Deucher /* user states */ 600*da321c8aSAlex Deucher case POWER_STATE_TYPE_BATTERY: 601*da321c8aSAlex Deucher if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 602*da321c8aSAlex Deucher if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 603*da321c8aSAlex Deucher if (rdev->pm.dpm.new_active_crtc_count < 2) 604*da321c8aSAlex Deucher return ps; 605*da321c8aSAlex Deucher } else 606*da321c8aSAlex Deucher return ps; 607*da321c8aSAlex Deucher } 608*da321c8aSAlex Deucher break; 609*da321c8aSAlex Deucher case POWER_STATE_TYPE_BALANCED: 610*da321c8aSAlex Deucher if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 611*da321c8aSAlex Deucher if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 612*da321c8aSAlex Deucher if (rdev->pm.dpm.new_active_crtc_count < 2) 613*da321c8aSAlex Deucher return ps; 614*da321c8aSAlex Deucher } else 615*da321c8aSAlex Deucher return ps; 616*da321c8aSAlex Deucher } 617*da321c8aSAlex Deucher break; 618*da321c8aSAlex Deucher case POWER_STATE_TYPE_PERFORMANCE: 619*da321c8aSAlex Deucher if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 620*da321c8aSAlex Deucher if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 621*da321c8aSAlex Deucher if (rdev->pm.dpm.new_active_crtc_count < 2) 622*da321c8aSAlex Deucher return ps; 623*da321c8aSAlex Deucher } else 624*da321c8aSAlex Deucher return ps; 625*da321c8aSAlex Deucher } 626*da321c8aSAlex Deucher break; 627*da321c8aSAlex Deucher /* internal states */ 628*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD: 629*da321c8aSAlex Deucher return rdev->pm.dpm.uvd_ps; 630*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_SD: 631*da321c8aSAlex Deucher if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 632*da321c8aSAlex Deucher return ps; 633*da321c8aSAlex Deucher break; 634*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_HD: 635*da321c8aSAlex Deucher if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 636*da321c8aSAlex Deucher return ps; 637*da321c8aSAlex Deucher break; 638*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 639*da321c8aSAlex Deucher if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 640*da321c8aSAlex Deucher return ps; 641*da321c8aSAlex Deucher break; 642*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 643*da321c8aSAlex Deucher if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 644*da321c8aSAlex Deucher return ps; 645*da321c8aSAlex Deucher break; 646*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_BOOT: 647*da321c8aSAlex Deucher return rdev->pm.dpm.boot_ps; 648*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_THERMAL: 649*da321c8aSAlex Deucher if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 650*da321c8aSAlex Deucher return ps; 651*da321c8aSAlex Deucher break; 652*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_ACPI: 653*da321c8aSAlex Deucher if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 654*da321c8aSAlex Deucher return ps; 655*da321c8aSAlex Deucher break; 656*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_ULV: 657*da321c8aSAlex Deucher if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 658*da321c8aSAlex Deucher return ps; 659*da321c8aSAlex Deucher break; 660*da321c8aSAlex Deucher default: 661*da321c8aSAlex Deucher break; 662*da321c8aSAlex Deucher } 663*da321c8aSAlex Deucher } 664*da321c8aSAlex Deucher /* use a fallback state if we didn't match */ 665*da321c8aSAlex Deucher switch (dpm_state) { 666*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_SD: 667*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_HD: 668*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 669*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 670*da321c8aSAlex Deucher return rdev->pm.dpm.uvd_ps; 671*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_THERMAL: 672*da321c8aSAlex Deucher dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 673*da321c8aSAlex Deucher goto restart_search; 674*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_ACPI: 675*da321c8aSAlex Deucher dpm_state = POWER_STATE_TYPE_BATTERY; 676*da321c8aSAlex Deucher goto restart_search; 677*da321c8aSAlex Deucher case POWER_STATE_TYPE_BATTERY: 678*da321c8aSAlex Deucher dpm_state = POWER_STATE_TYPE_PERFORMANCE; 679*da321c8aSAlex Deucher goto restart_search; 680*da321c8aSAlex Deucher default: 681*da321c8aSAlex Deucher break; 682*da321c8aSAlex Deucher } 683*da321c8aSAlex Deucher 684*da321c8aSAlex Deucher return NULL; 685*da321c8aSAlex Deucher } 686*da321c8aSAlex Deucher 687*da321c8aSAlex Deucher static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) 688*da321c8aSAlex Deucher { 689*da321c8aSAlex Deucher int i; 690*da321c8aSAlex Deucher struct radeon_ps *ps; 691*da321c8aSAlex Deucher enum radeon_pm_state_type dpm_state; 692*da321c8aSAlex Deucher 693*da321c8aSAlex Deucher /* if dpm init failed */ 694*da321c8aSAlex Deucher if (!rdev->pm.dpm_enabled) 695*da321c8aSAlex Deucher return; 696*da321c8aSAlex Deucher 697*da321c8aSAlex Deucher if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) { 698*da321c8aSAlex Deucher /* add other state override checks here */ 699*da321c8aSAlex Deucher if (!rdev->pm.dpm.thermal_active) 700*da321c8aSAlex Deucher rdev->pm.dpm.state = rdev->pm.dpm.user_state; 701*da321c8aSAlex Deucher } 702*da321c8aSAlex Deucher dpm_state = rdev->pm.dpm.state; 703*da321c8aSAlex Deucher 704*da321c8aSAlex Deucher ps = radeon_dpm_pick_power_state(rdev, dpm_state); 705*da321c8aSAlex Deucher if (ps) 706*da321c8aSAlex Deucher rdev->pm.dpm.requested_ps = ps; 707*da321c8aSAlex Deucher else 708*da321c8aSAlex Deucher return; 709*da321c8aSAlex Deucher 710*da321c8aSAlex Deucher /* no need to reprogram if nothing changed */ 711*da321c8aSAlex Deucher if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 712*da321c8aSAlex Deucher /* update display watermarks based on new power state */ 713*da321c8aSAlex Deucher if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) { 714*da321c8aSAlex Deucher radeon_bandwidth_update(rdev); 715*da321c8aSAlex Deucher /* update displays */ 716*da321c8aSAlex Deucher radeon_dpm_display_configuration_changed(rdev); 717*da321c8aSAlex Deucher rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 718*da321c8aSAlex Deucher rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 719*da321c8aSAlex Deucher } 720*da321c8aSAlex Deucher return; 721*da321c8aSAlex Deucher } 722*da321c8aSAlex Deucher 723*da321c8aSAlex Deucher printk("switching from power state:\n"); 724*da321c8aSAlex Deucher radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 725*da321c8aSAlex Deucher printk("switching to power state:\n"); 726*da321c8aSAlex Deucher radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 727*da321c8aSAlex Deucher 728*da321c8aSAlex Deucher mutex_lock(&rdev->ddev->struct_mutex); 729*da321c8aSAlex Deucher down_write(&rdev->pm.mclk_lock); 730*da321c8aSAlex Deucher mutex_lock(&rdev->ring_lock); 731*da321c8aSAlex Deucher 732*da321c8aSAlex Deucher /* update display watermarks based on new power state */ 733*da321c8aSAlex Deucher radeon_bandwidth_update(rdev); 734*da321c8aSAlex Deucher /* update displays */ 735*da321c8aSAlex Deucher radeon_dpm_display_configuration_changed(rdev); 736*da321c8aSAlex Deucher 737*da321c8aSAlex Deucher rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 738*da321c8aSAlex Deucher rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 739*da321c8aSAlex Deucher 740*da321c8aSAlex Deucher /* wait for the rings to drain */ 741*da321c8aSAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; i++) { 742*da321c8aSAlex Deucher struct radeon_ring *ring = &rdev->ring[i]; 743*da321c8aSAlex Deucher if (ring->ready) 744*da321c8aSAlex Deucher radeon_fence_wait_empty_locked(rdev, i); 745*da321c8aSAlex Deucher } 746*da321c8aSAlex Deucher 747*da321c8aSAlex Deucher /* program the new power state */ 748*da321c8aSAlex Deucher radeon_dpm_set_power_state(rdev); 749*da321c8aSAlex Deucher 750*da321c8aSAlex Deucher /* update current power state */ 751*da321c8aSAlex Deucher rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps; 752*da321c8aSAlex Deucher 753*da321c8aSAlex Deucher mutex_unlock(&rdev->ring_lock); 754*da321c8aSAlex Deucher up_write(&rdev->pm.mclk_lock); 755*da321c8aSAlex Deucher mutex_unlock(&rdev->ddev->struct_mutex); 756*da321c8aSAlex Deucher } 757*da321c8aSAlex Deucher 758*da321c8aSAlex Deucher void radeon_dpm_enable_power_state(struct radeon_device *rdev, 759*da321c8aSAlex Deucher enum radeon_pm_state_type dpm_state) 760*da321c8aSAlex Deucher { 761*da321c8aSAlex Deucher if (!rdev->pm.dpm_enabled) 762*da321c8aSAlex Deucher return; 763*da321c8aSAlex Deucher 764*da321c8aSAlex Deucher mutex_lock(&rdev->pm.mutex); 765*da321c8aSAlex Deucher switch (dpm_state) { 766*da321c8aSAlex Deucher case POWER_STATE_TYPE_INTERNAL_THERMAL: 767*da321c8aSAlex Deucher rdev->pm.dpm.thermal_active = true; 768*da321c8aSAlex Deucher break; 769*da321c8aSAlex Deucher default: 770*da321c8aSAlex Deucher rdev->pm.dpm.thermal_active = false; 771*da321c8aSAlex Deucher break; 772*da321c8aSAlex Deucher } 773*da321c8aSAlex Deucher rdev->pm.dpm.state = dpm_state; 774*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 775*da321c8aSAlex Deucher radeon_pm_compute_clocks(rdev); 776*da321c8aSAlex Deucher } 777*da321c8aSAlex Deucher 778*da321c8aSAlex Deucher static void radeon_pm_suspend_old(struct radeon_device *rdev) 779ce8f5370SAlex Deucher { 780ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 7813f53eb6fSRafael J. Wysocki if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 7823f53eb6fSRafael J. Wysocki if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 7833f53eb6fSRafael J. Wysocki rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 7843f53eb6fSRafael J. Wysocki } 785ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 78632c87fcaSTejun Heo 78732c87fcaSTejun Heo cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 788ce8f5370SAlex Deucher } 789ce8f5370SAlex Deucher 790*da321c8aSAlex Deucher static void radeon_pm_suspend_dpm(struct radeon_device *rdev) 791*da321c8aSAlex Deucher { 792*da321c8aSAlex Deucher mutex_lock(&rdev->pm.mutex); 793*da321c8aSAlex Deucher /* disable dpm */ 794*da321c8aSAlex Deucher radeon_dpm_disable(rdev); 795*da321c8aSAlex Deucher /* reset the power state */ 796*da321c8aSAlex Deucher rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 797*da321c8aSAlex Deucher rdev->pm.dpm_enabled = false; 798*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 799*da321c8aSAlex Deucher } 800*da321c8aSAlex Deucher 801*da321c8aSAlex Deucher void radeon_pm_suspend(struct radeon_device *rdev) 802*da321c8aSAlex Deucher { 803*da321c8aSAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DPM) 804*da321c8aSAlex Deucher radeon_pm_suspend_dpm(rdev); 805*da321c8aSAlex Deucher else 806*da321c8aSAlex Deucher radeon_pm_suspend_old(rdev); 807*da321c8aSAlex Deucher } 808*da321c8aSAlex Deucher 809*da321c8aSAlex Deucher static void radeon_pm_resume_old(struct radeon_device *rdev) 810ce8f5370SAlex Deucher { 811ed18a360SAlex Deucher /* set up the default clocks if the MC ucode is loaded */ 8122e3b3b10SAlex Deucher if ((rdev->family >= CHIP_BARTS) && 8132e3b3b10SAlex Deucher (rdev->family <= CHIP_CAYMAN) && 8142e3b3b10SAlex Deucher rdev->mc_fw) { 815ed18a360SAlex Deucher if (rdev->pm.default_vddc) 8168a83ec5eSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 8178a83ec5eSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDC); 8182feea49aSAlex Deucher if (rdev->pm.default_vddci) 8192feea49aSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 8202feea49aSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDCI); 821ed18a360SAlex Deucher if (rdev->pm.default_sclk) 822ed18a360SAlex Deucher radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 823ed18a360SAlex Deucher if (rdev->pm.default_mclk) 824ed18a360SAlex Deucher radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 825ed18a360SAlex Deucher } 826f8ed8b4cSAlex Deucher /* asic init will reset the default power state */ 827f8ed8b4cSAlex Deucher mutex_lock(&rdev->pm.mutex); 828f8ed8b4cSAlex Deucher rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 829f8ed8b4cSAlex Deucher rdev->pm.current_clock_mode_index = 0; 8309ace9f7bSAlex Deucher rdev->pm.current_sclk = rdev->pm.default_sclk; 8319ace9f7bSAlex Deucher rdev->pm.current_mclk = rdev->pm.default_mclk; 8324d60173fSAlex Deucher rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 8332feea49aSAlex Deucher rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 8343f53eb6fSRafael J. Wysocki if (rdev->pm.pm_method == PM_METHOD_DYNPM 8353f53eb6fSRafael J. Wysocki && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 8363f53eb6fSRafael J. Wysocki rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 83732c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 8383f53eb6fSRafael J. Wysocki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 8393f53eb6fSRafael J. Wysocki } 840f8ed8b4cSAlex Deucher mutex_unlock(&rdev->pm.mutex); 841ce8f5370SAlex Deucher radeon_pm_compute_clocks(rdev); 842d0d6cb81SRafał Miłecki } 843d0d6cb81SRafał Miłecki 844*da321c8aSAlex Deucher static void radeon_pm_resume_dpm(struct radeon_device *rdev) 8457433874eSRafał Miłecki { 84626481fb1SDave Airlie int ret; 8470d18abedSDan Carpenter 848*da321c8aSAlex Deucher /* asic init will reset to the boot state */ 849*da321c8aSAlex Deucher mutex_lock(&rdev->pm.mutex); 850*da321c8aSAlex Deucher rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 851*da321c8aSAlex Deucher radeon_dpm_setup_asic(rdev); 852*da321c8aSAlex Deucher ret = radeon_dpm_enable(rdev); 853*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 854*da321c8aSAlex Deucher if (ret) { 855*da321c8aSAlex Deucher DRM_ERROR("radeon: dpm resume failed\n"); 856*da321c8aSAlex Deucher if ((rdev->family >= CHIP_BARTS) && 857*da321c8aSAlex Deucher (rdev->family <= CHIP_CAYMAN) && 858*da321c8aSAlex Deucher rdev->mc_fw) { 859*da321c8aSAlex Deucher if (rdev->pm.default_vddc) 860*da321c8aSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 861*da321c8aSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDC); 862*da321c8aSAlex Deucher if (rdev->pm.default_vddci) 863*da321c8aSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 864*da321c8aSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDCI); 865*da321c8aSAlex Deucher if (rdev->pm.default_sclk) 866*da321c8aSAlex Deucher radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 867*da321c8aSAlex Deucher if (rdev->pm.default_mclk) 868*da321c8aSAlex Deucher radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 869*da321c8aSAlex Deucher } 870*da321c8aSAlex Deucher } else { 871*da321c8aSAlex Deucher rdev->pm.dpm_enabled = true; 872*da321c8aSAlex Deucher radeon_pm_compute_clocks(rdev); 873*da321c8aSAlex Deucher } 874*da321c8aSAlex Deucher } 875*da321c8aSAlex Deucher 876*da321c8aSAlex Deucher void radeon_pm_resume(struct radeon_device *rdev) 877*da321c8aSAlex Deucher { 878*da321c8aSAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DPM) 879*da321c8aSAlex Deucher radeon_pm_resume_dpm(rdev); 880*da321c8aSAlex Deucher else 881*da321c8aSAlex Deucher radeon_pm_resume_old(rdev); 882*da321c8aSAlex Deucher } 883*da321c8aSAlex Deucher 884*da321c8aSAlex Deucher static int radeon_pm_init_old(struct radeon_device *rdev) 885*da321c8aSAlex Deucher { 886*da321c8aSAlex Deucher int ret; 887*da321c8aSAlex Deucher 888f8ed8b4cSAlex Deucher rdev->pm.profile = PM_PROFILE_DEFAULT; 889ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 890ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 891ce8f5370SAlex Deucher rdev->pm.dynpm_can_upclock = true; 892ce8f5370SAlex Deucher rdev->pm.dynpm_can_downclock = true; 8939ace9f7bSAlex Deucher rdev->pm.default_sclk = rdev->clock.default_sclk; 8949ace9f7bSAlex Deucher rdev->pm.default_mclk = rdev->clock.default_mclk; 895f8ed8b4cSAlex Deucher rdev->pm.current_sclk = rdev->clock.default_sclk; 896f8ed8b4cSAlex Deucher rdev->pm.current_mclk = rdev->clock.default_mclk; 89721a8122aSAlex Deucher rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 898c913e23aSRafał Miłecki 89956278a8eSAlex Deucher if (rdev->bios) { 90056278a8eSAlex Deucher if (rdev->is_atom_bios) 90156278a8eSAlex Deucher radeon_atombios_get_power_modes(rdev); 90256278a8eSAlex Deucher else 90356278a8eSAlex Deucher radeon_combios_get_power_modes(rdev); 904f712d0c7SRafał Miłecki radeon_pm_print_states(rdev); 905ce8f5370SAlex Deucher radeon_pm_init_profile(rdev); 906ed18a360SAlex Deucher /* set up the default clocks if the MC ucode is loaded */ 9072e3b3b10SAlex Deucher if ((rdev->family >= CHIP_BARTS) && 9082e3b3b10SAlex Deucher (rdev->family <= CHIP_CAYMAN) && 9092e3b3b10SAlex Deucher rdev->mc_fw) { 910ed18a360SAlex Deucher if (rdev->pm.default_vddc) 9118a83ec5eSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 9128a83ec5eSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDC); 9134639dd21SAlex Deucher if (rdev->pm.default_vddci) 9144639dd21SAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 9154639dd21SAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDCI); 916ed18a360SAlex Deucher if (rdev->pm.default_sclk) 917ed18a360SAlex Deucher radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 918ed18a360SAlex Deucher if (rdev->pm.default_mclk) 919ed18a360SAlex Deucher radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 920ed18a360SAlex Deucher } 92156278a8eSAlex Deucher } 92256278a8eSAlex Deucher 92321a8122aSAlex Deucher /* set up the internal thermal sensor if applicable */ 9240d18abedSDan Carpenter ret = radeon_hwmon_init(rdev); 9250d18abedSDan Carpenter if (ret) 9260d18abedSDan Carpenter return ret; 92732c87fcaSTejun Heo 92832c87fcaSTejun Heo INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 92932c87fcaSTejun Heo 930ce8f5370SAlex Deucher if (rdev->pm.num_power_states > 1) { 931ce8f5370SAlex Deucher /* where's the best place to put these? */ 93226481fb1SDave Airlie ret = device_create_file(rdev->dev, &dev_attr_power_profile); 93326481fb1SDave Airlie if (ret) 93426481fb1SDave Airlie DRM_ERROR("failed to create device file for power profile\n"); 93526481fb1SDave Airlie ret = device_create_file(rdev->dev, &dev_attr_power_method); 93626481fb1SDave Airlie if (ret) 93726481fb1SDave Airlie DRM_ERROR("failed to create device file for power method\n"); 938ce8f5370SAlex Deucher 9397433874eSRafał Miłecki if (radeon_debugfs_pm_init(rdev)) { 940c142c3e5SRafał Miłecki DRM_ERROR("Failed to register debugfs file for PM!\n"); 9417433874eSRafał Miłecki } 9427433874eSRafał Miłecki 943c913e23aSRafał Miłecki DRM_INFO("radeon: power management initialized\n"); 944ce8f5370SAlex Deucher } 945c913e23aSRafał Miłecki 9467433874eSRafał Miłecki return 0; 9477433874eSRafał Miłecki } 9487433874eSRafał Miłecki 949*da321c8aSAlex Deucher static void radeon_dpm_print_power_states(struct radeon_device *rdev) 950*da321c8aSAlex Deucher { 951*da321c8aSAlex Deucher int i; 952*da321c8aSAlex Deucher 953*da321c8aSAlex Deucher for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 954*da321c8aSAlex Deucher printk("== power state %d ==\n", i); 955*da321c8aSAlex Deucher radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]); 956*da321c8aSAlex Deucher } 957*da321c8aSAlex Deucher } 958*da321c8aSAlex Deucher 959*da321c8aSAlex Deucher static int radeon_pm_init_dpm(struct radeon_device *rdev) 960*da321c8aSAlex Deucher { 961*da321c8aSAlex Deucher int ret; 962*da321c8aSAlex Deucher 963*da321c8aSAlex Deucher /* default to performance state */ 964*da321c8aSAlex Deucher rdev->pm.dpm.state = POWER_STATE_TYPE_PERFORMANCE; 965*da321c8aSAlex Deucher rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 966*da321c8aSAlex Deucher rdev->pm.default_sclk = rdev->clock.default_sclk; 967*da321c8aSAlex Deucher rdev->pm.default_mclk = rdev->clock.default_mclk; 968*da321c8aSAlex Deucher rdev->pm.current_sclk = rdev->clock.default_sclk; 969*da321c8aSAlex Deucher rdev->pm.current_mclk = rdev->clock.default_mclk; 970*da321c8aSAlex Deucher rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 971*da321c8aSAlex Deucher 972*da321c8aSAlex Deucher if (rdev->bios && rdev->is_atom_bios) 973*da321c8aSAlex Deucher radeon_atombios_get_power_modes(rdev); 974*da321c8aSAlex Deucher else 975*da321c8aSAlex Deucher return -EINVAL; 976*da321c8aSAlex Deucher 977*da321c8aSAlex Deucher /* set up the internal thermal sensor if applicable */ 978*da321c8aSAlex Deucher ret = radeon_hwmon_init(rdev); 979*da321c8aSAlex Deucher if (ret) 980*da321c8aSAlex Deucher return ret; 981*da321c8aSAlex Deucher 982*da321c8aSAlex Deucher INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler); 983*da321c8aSAlex Deucher mutex_lock(&rdev->pm.mutex); 984*da321c8aSAlex Deucher radeon_dpm_init(rdev); 985*da321c8aSAlex Deucher rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 986*da321c8aSAlex Deucher radeon_dpm_print_power_states(rdev); 987*da321c8aSAlex Deucher radeon_dpm_setup_asic(rdev); 988*da321c8aSAlex Deucher ret = radeon_dpm_enable(rdev); 989*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 990*da321c8aSAlex Deucher if (ret) { 991*da321c8aSAlex Deucher rdev->pm.dpm_enabled = false; 992*da321c8aSAlex Deucher if ((rdev->family >= CHIP_BARTS) && 993*da321c8aSAlex Deucher (rdev->family <= CHIP_CAYMAN) && 994*da321c8aSAlex Deucher rdev->mc_fw) { 995*da321c8aSAlex Deucher if (rdev->pm.default_vddc) 996*da321c8aSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 997*da321c8aSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDC); 998*da321c8aSAlex Deucher if (rdev->pm.default_vddci) 999*da321c8aSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1000*da321c8aSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDCI); 1001*da321c8aSAlex Deucher if (rdev->pm.default_sclk) 1002*da321c8aSAlex Deucher radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1003*da321c8aSAlex Deucher if (rdev->pm.default_mclk) 1004*da321c8aSAlex Deucher radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1005*da321c8aSAlex Deucher } 1006*da321c8aSAlex Deucher DRM_ERROR("radeon: dpm initialization failed\n"); 1007*da321c8aSAlex Deucher return ret; 1008*da321c8aSAlex Deucher } 1009*da321c8aSAlex Deucher rdev->pm.dpm_enabled = true; 1010*da321c8aSAlex Deucher radeon_pm_compute_clocks(rdev); 1011*da321c8aSAlex Deucher 1012*da321c8aSAlex Deucher if (rdev->pm.num_power_states > 1) { 1013*da321c8aSAlex Deucher ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1014*da321c8aSAlex Deucher if (ret) 1015*da321c8aSAlex Deucher DRM_ERROR("failed to create device file for dpm state\n"); 1016*da321c8aSAlex Deucher /* XXX: these are noops for dpm but are here for backwards compat */ 1017*da321c8aSAlex Deucher ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1018*da321c8aSAlex Deucher if (ret) 1019*da321c8aSAlex Deucher DRM_ERROR("failed to create device file for power profile\n"); 1020*da321c8aSAlex Deucher ret = device_create_file(rdev->dev, &dev_attr_power_method); 1021*da321c8aSAlex Deucher if (ret) 1022*da321c8aSAlex Deucher DRM_ERROR("failed to create device file for power method\n"); 1023*da321c8aSAlex Deucher DRM_INFO("radeon: dpm initialized\n"); 1024*da321c8aSAlex Deucher } 1025*da321c8aSAlex Deucher 1026*da321c8aSAlex Deucher return 0; 1027*da321c8aSAlex Deucher } 1028*da321c8aSAlex Deucher 1029*da321c8aSAlex Deucher int radeon_pm_init(struct radeon_device *rdev) 1030*da321c8aSAlex Deucher { 1031*da321c8aSAlex Deucher /* enable dpm on rv6xx+ */ 1032*da321c8aSAlex Deucher switch (rdev->family) { 1033*da321c8aSAlex Deucher default: 1034*da321c8aSAlex Deucher /* default to profile method */ 1035*da321c8aSAlex Deucher rdev->pm.pm_method = PM_METHOD_PROFILE; 1036*da321c8aSAlex Deucher break; 1037*da321c8aSAlex Deucher } 1038*da321c8aSAlex Deucher 1039*da321c8aSAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DPM) 1040*da321c8aSAlex Deucher return radeon_pm_init_dpm(rdev); 1041*da321c8aSAlex Deucher else 1042*da321c8aSAlex Deucher return radeon_pm_init_old(rdev); 1043*da321c8aSAlex Deucher } 1044*da321c8aSAlex Deucher 1045*da321c8aSAlex Deucher static void radeon_pm_fini_old(struct radeon_device *rdev) 104629fb52caSAlex Deucher { 1047ce8f5370SAlex Deucher if (rdev->pm.num_power_states > 1) { 1048a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 1049ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1050ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_DEFAULT; 1051ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 1052ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 1053ce8f5370SAlex Deucher } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1054ce8f5370SAlex Deucher /* reset default clocks */ 1055ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1056ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1057ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 105858e21dffSAlex Deucher } 1059ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 106032c87fcaSTejun Heo 106132c87fcaSTejun Heo cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 106258e21dffSAlex Deucher 1063ce8f5370SAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_profile); 1064ce8f5370SAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_method); 1065ce8f5370SAlex Deucher } 1066a424816fSAlex Deucher 10670975b162SAlex Deucher if (rdev->pm.power_state) 10680975b162SAlex Deucher kfree(rdev->pm.power_state); 10690975b162SAlex Deucher 107021a8122aSAlex Deucher radeon_hwmon_fini(rdev); 107129fb52caSAlex Deucher } 107229fb52caSAlex Deucher 1073*da321c8aSAlex Deucher static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1074*da321c8aSAlex Deucher { 1075*da321c8aSAlex Deucher if (rdev->pm.num_power_states > 1) { 1076*da321c8aSAlex Deucher mutex_lock(&rdev->pm.mutex); 1077*da321c8aSAlex Deucher radeon_dpm_disable(rdev); 1078*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 1079*da321c8aSAlex Deucher 1080*da321c8aSAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_dpm_state); 1081*da321c8aSAlex Deucher /* XXX backwards compat */ 1082*da321c8aSAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_profile); 1083*da321c8aSAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_method); 1084*da321c8aSAlex Deucher } 1085*da321c8aSAlex Deucher radeon_dpm_fini(rdev); 1086*da321c8aSAlex Deucher 1087*da321c8aSAlex Deucher if (rdev->pm.power_state) 1088*da321c8aSAlex Deucher kfree(rdev->pm.power_state); 1089*da321c8aSAlex Deucher 1090*da321c8aSAlex Deucher radeon_hwmon_fini(rdev); 1091*da321c8aSAlex Deucher } 1092*da321c8aSAlex Deucher 1093*da321c8aSAlex Deucher void radeon_pm_fini(struct radeon_device *rdev) 1094*da321c8aSAlex Deucher { 1095*da321c8aSAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DPM) 1096*da321c8aSAlex Deucher radeon_pm_fini_dpm(rdev); 1097*da321c8aSAlex Deucher else 1098*da321c8aSAlex Deucher radeon_pm_fini_old(rdev); 1099*da321c8aSAlex Deucher } 1100*da321c8aSAlex Deucher 1101*da321c8aSAlex Deucher static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) 1102c913e23aSRafał Miłecki { 1103c913e23aSRafał Miłecki struct drm_device *ddev = rdev->ddev; 1104a48b9b4eSAlex Deucher struct drm_crtc *crtc; 1105c913e23aSRafał Miłecki struct radeon_crtc *radeon_crtc; 1106c913e23aSRafał Miłecki 1107ce8f5370SAlex Deucher if (rdev->pm.num_power_states < 2) 1108ce8f5370SAlex Deucher return; 1109ce8f5370SAlex Deucher 1110c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 1111c913e23aSRafał Miłecki 1112c913e23aSRafał Miłecki rdev->pm.active_crtcs = 0; 1113a48b9b4eSAlex Deucher rdev->pm.active_crtc_count = 0; 1114a48b9b4eSAlex Deucher list_for_each_entry(crtc, 1115a48b9b4eSAlex Deucher &ddev->mode_config.crtc_list, head) { 1116a48b9b4eSAlex Deucher radeon_crtc = to_radeon_crtc(crtc); 1117a48b9b4eSAlex Deucher if (radeon_crtc->enabled) { 1118c913e23aSRafał Miłecki rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 1119a48b9b4eSAlex Deucher rdev->pm.active_crtc_count++; 1120c913e23aSRafał Miłecki } 1121c913e23aSRafał Miłecki } 1122c913e23aSRafał Miłecki 1123ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1124ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 1125ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 1126ce8f5370SAlex Deucher } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1127ce8f5370SAlex Deucher if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 1128a48b9b4eSAlex Deucher if (rdev->pm.active_crtc_count > 1) { 1129ce8f5370SAlex Deucher if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1130ce8f5370SAlex Deucher cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1131c913e23aSRafał Miłecki 1132ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 1133ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1134ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 1135ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 1136c913e23aSRafał Miłecki 1137d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 1138c913e23aSRafał Miłecki } 1139a48b9b4eSAlex Deucher } else if (rdev->pm.active_crtc_count == 1) { 1140c913e23aSRafał Miłecki /* TODO: Increase clocks if needed for current mode */ 1141c913e23aSRafał Miłecki 1142ce8f5370SAlex Deucher if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 1143ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1144ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 1145ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 1146ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 1147c913e23aSRafał Miłecki 114832c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1149c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1150ce8f5370SAlex Deucher } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 1151ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 115232c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1153c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1154d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 1155c913e23aSRafał Miłecki } 1156a48b9b4eSAlex Deucher } else { /* count == 0 */ 1157ce8f5370SAlex Deucher if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 1158ce8f5370SAlex Deucher cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1159c913e23aSRafał Miłecki 1160ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 1161ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 1162ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 1163ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 1164ce8f5370SAlex Deucher } 1165ce8f5370SAlex Deucher } 116673a6d3fcSRafał Miłecki } 1167c913e23aSRafał Miłecki } 1168c913e23aSRafał Miłecki 1169c913e23aSRafał Miłecki mutex_unlock(&rdev->pm.mutex); 1170c913e23aSRafał Miłecki } 1171c913e23aSRafał Miłecki 1172*da321c8aSAlex Deucher static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) 1173*da321c8aSAlex Deucher { 1174*da321c8aSAlex Deucher struct drm_device *ddev = rdev->ddev; 1175*da321c8aSAlex Deucher struct drm_crtc *crtc; 1176*da321c8aSAlex Deucher struct radeon_crtc *radeon_crtc; 1177*da321c8aSAlex Deucher 1178*da321c8aSAlex Deucher mutex_lock(&rdev->pm.mutex); 1179*da321c8aSAlex Deucher 1180*da321c8aSAlex Deucher rdev->pm.dpm.new_active_crtcs = 0; 1181*da321c8aSAlex Deucher rdev->pm.dpm.new_active_crtc_count = 0; 1182*da321c8aSAlex Deucher list_for_each_entry(crtc, 1183*da321c8aSAlex Deucher &ddev->mode_config.crtc_list, head) { 1184*da321c8aSAlex Deucher radeon_crtc = to_radeon_crtc(crtc); 1185*da321c8aSAlex Deucher if (crtc->enabled) { 1186*da321c8aSAlex Deucher rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); 1187*da321c8aSAlex Deucher rdev->pm.dpm.new_active_crtc_count++; 1188*da321c8aSAlex Deucher } 1189*da321c8aSAlex Deucher } 1190*da321c8aSAlex Deucher 1191*da321c8aSAlex Deucher radeon_dpm_change_power_state_locked(rdev); 1192*da321c8aSAlex Deucher 1193*da321c8aSAlex Deucher mutex_unlock(&rdev->pm.mutex); 1194*da321c8aSAlex Deucher } 1195*da321c8aSAlex Deucher 1196*da321c8aSAlex Deucher void radeon_pm_compute_clocks(struct radeon_device *rdev) 1197*da321c8aSAlex Deucher { 1198*da321c8aSAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DPM) 1199*da321c8aSAlex Deucher radeon_pm_compute_clocks_dpm(rdev); 1200*da321c8aSAlex Deucher else 1201*da321c8aSAlex Deucher radeon_pm_compute_clocks_old(rdev); 1202*da321c8aSAlex Deucher } 1203*da321c8aSAlex Deucher 1204ce8f5370SAlex Deucher static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1205f735261bSDave Airlie { 120675fa0b08SMario Kleiner int crtc, vpos, hpos, vbl_status; 1207f735261bSDave Airlie bool in_vbl = true; 1208f735261bSDave Airlie 120975fa0b08SMario Kleiner /* Iterate over all active crtc's. All crtc's must be in vblank, 121075fa0b08SMario Kleiner * otherwise return in_vbl == false. 121175fa0b08SMario Kleiner */ 121275fa0b08SMario Kleiner for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 121375fa0b08SMario Kleiner if (rdev->pm.active_crtcs & (1 << crtc)) { 1214f5a80209SMario Kleiner vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); 1215f5a80209SMario Kleiner if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1216f5a80209SMario Kleiner !(vbl_status & DRM_SCANOUTPOS_INVBL)) 1217f735261bSDave Airlie in_vbl = false; 1218f735261bSDave Airlie } 1219f735261bSDave Airlie } 1220f81f2024SMatthew Garrett 1221f81f2024SMatthew Garrett return in_vbl; 1222f81f2024SMatthew Garrett } 1223f81f2024SMatthew Garrett 1224ce8f5370SAlex Deucher static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 1225f81f2024SMatthew Garrett { 1226f81f2024SMatthew Garrett u32 stat_crtc = 0; 1227f81f2024SMatthew Garrett bool in_vbl = radeon_pm_in_vbl(rdev); 1228f81f2024SMatthew Garrett 1229f735261bSDave Airlie if (in_vbl == false) 1230d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 1231bae6b562SAlex Deucher finish ? "exit" : "entry"); 1232f735261bSDave Airlie return in_vbl; 1233f735261bSDave Airlie } 1234c913e23aSRafał Miłecki 1235ce8f5370SAlex Deucher static void radeon_dynpm_idle_work_handler(struct work_struct *work) 1236c913e23aSRafał Miłecki { 1237c913e23aSRafał Miłecki struct radeon_device *rdev; 1238d9932a32SMatthew Garrett int resched; 1239c913e23aSRafał Miłecki rdev = container_of(work, struct radeon_device, 1240ce8f5370SAlex Deucher pm.dynpm_idle_work.work); 1241c913e23aSRafał Miłecki 1242d9932a32SMatthew Garrett resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1243c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 1244ce8f5370SAlex Deucher if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1245c913e23aSRafał Miłecki int not_processed = 0; 12467465280cSAlex Deucher int i; 1247c913e23aSRafał Miłecki 12487465280cSAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; ++i) { 12490ec0612aSAlex Deucher struct radeon_ring *ring = &rdev->ring[i]; 12500ec0612aSAlex Deucher 12510ec0612aSAlex Deucher if (ring->ready) { 125247492a23SChristian König not_processed += radeon_fence_count_emitted(rdev, i); 12537465280cSAlex Deucher if (not_processed >= 3) 12547465280cSAlex Deucher break; 12557465280cSAlex Deucher } 12560ec0612aSAlex Deucher } 1257c913e23aSRafał Miłecki 1258c913e23aSRafał Miłecki if (not_processed >= 3) { /* should upclock */ 1259ce8f5370SAlex Deucher if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 1260ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1261ce8f5370SAlex Deucher } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1262ce8f5370SAlex Deucher rdev->pm.dynpm_can_upclock) { 1263ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = 1264ce8f5370SAlex Deucher DYNPM_ACTION_UPCLOCK; 1265ce8f5370SAlex Deucher rdev->pm.dynpm_action_timeout = jiffies + 1266c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1267c913e23aSRafał Miłecki } 1268c913e23aSRafał Miłecki } else if (not_processed == 0) { /* should downclock */ 1269ce8f5370SAlex Deucher if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 1270ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1271ce8f5370SAlex Deucher } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1272ce8f5370SAlex Deucher rdev->pm.dynpm_can_downclock) { 1273ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = 1274ce8f5370SAlex Deucher DYNPM_ACTION_DOWNCLOCK; 1275ce8f5370SAlex Deucher rdev->pm.dynpm_action_timeout = jiffies + 1276c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1277c913e23aSRafał Miłecki } 1278c913e23aSRafał Miłecki } 1279c913e23aSRafał Miłecki 1280d7311171SAlex Deucher /* Note, radeon_pm_set_clocks is called with static_switch set 1281d7311171SAlex Deucher * to false since we want to wait for vbl to avoid flicker. 1282d7311171SAlex Deucher */ 1283ce8f5370SAlex Deucher if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 1284ce8f5370SAlex Deucher jiffies > rdev->pm.dynpm_action_timeout) { 1285ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 1286ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 1287c913e23aSRafał Miłecki } 1288c913e23aSRafał Miłecki 128932c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1290c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1291c913e23aSRafał Miłecki } 12923f53eb6fSRafael J. Wysocki mutex_unlock(&rdev->pm.mutex); 12933f53eb6fSRafael J. Wysocki ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 12943f53eb6fSRafael J. Wysocki } 1295c913e23aSRafał Miłecki 12967433874eSRafał Miłecki /* 12977433874eSRafał Miłecki * Debugfs info 12987433874eSRafał Miłecki */ 12997433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 13007433874eSRafał Miłecki 13017433874eSRafał Miłecki static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 13027433874eSRafał Miłecki { 13037433874eSRafał Miłecki struct drm_info_node *node = (struct drm_info_node *) m->private; 13047433874eSRafał Miłecki struct drm_device *dev = node->minor->dev; 13057433874eSRafał Miłecki struct radeon_device *rdev = dev->dev_private; 13067433874eSRafał Miłecki 13079ace9f7bSAlex Deucher seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 1308bf05d998SAlex Deucher /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ 1309bf05d998SAlex Deucher if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) 1310bf05d998SAlex Deucher seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); 1311bf05d998SAlex Deucher else 13126234077dSRafał Miłecki seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 13139ace9f7bSAlex Deucher seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 1314798bcf73SAlex Deucher if (rdev->asic->pm.get_memory_clock) 13156234077dSRafał Miłecki seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 13160fcbe947SRafał Miłecki if (rdev->pm.current_vddc) 13170fcbe947SRafał Miłecki seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 1318798bcf73SAlex Deucher if (rdev->asic->pm.get_pcie_lanes) 1319aa5120d2SRafał Miłecki seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 13207433874eSRafał Miłecki 13217433874eSRafał Miłecki return 0; 13227433874eSRafał Miłecki } 13237433874eSRafał Miłecki 13247433874eSRafał Miłecki static struct drm_info_list radeon_pm_info_list[] = { 13257433874eSRafał Miłecki {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 13267433874eSRafał Miłecki }; 13277433874eSRafał Miłecki #endif 13287433874eSRafał Miłecki 1329c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev) 13307433874eSRafał Miłecki { 13317433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 13327433874eSRafał Miłecki return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 13337433874eSRafał Miłecki #else 13347433874eSRafał Miłecki return 0; 13357433874eSRafał Miłecki #endif 13367433874eSRafał Miłecki } 1337