17433874eSRafał Miłecki /* 27433874eSRafał Miłecki * Permission is hereby granted, free of charge, to any person obtaining a 37433874eSRafał Miłecki * copy of this software and associated documentation files (the "Software"), 47433874eSRafał Miłecki * to deal in the Software without restriction, including without limitation 57433874eSRafał Miłecki * the rights to use, copy, modify, merge, publish, distribute, sublicense, 67433874eSRafał Miłecki * and/or sell copies of the Software, and to permit persons to whom the 77433874eSRafał Miłecki * Software is furnished to do so, subject to the following conditions: 87433874eSRafał Miłecki * 97433874eSRafał Miłecki * The above copyright notice and this permission notice shall be included in 107433874eSRafał Miłecki * all copies or substantial portions of the Software. 117433874eSRafał Miłecki * 127433874eSRafał Miłecki * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 137433874eSRafał Miłecki * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 147433874eSRafał Miłecki * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 157433874eSRafał Miłecki * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 167433874eSRafał Miłecki * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 177433874eSRafał Miłecki * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 187433874eSRafał Miłecki * OTHER DEALINGS IN THE SOFTWARE. 197433874eSRafał Miłecki * 207433874eSRafał Miłecki * Authors: Rafał Miłecki <zajec5@gmail.com> 2156278a8eSAlex Deucher * Alex Deucher <alexdeucher@gmail.com> 227433874eSRafał Miłecki */ 23760285e7SDavid Howells #include <drm/drmP.h> 247433874eSRafał Miłecki #include "radeon.h" 25f735261bSDave Airlie #include "avivod.h" 268a83ec5eSAlex Deucher #include "atom.h" 27ce8f5370SAlex Deucher #include <linux/power_supply.h> 2821a8122aSAlex Deucher #include <linux/hwmon.h> 2921a8122aSAlex Deucher #include <linux/hwmon-sysfs.h> 307433874eSRafał Miłecki 31c913e23aSRafał Miłecki #define RADEON_IDLE_LOOP_MS 100 32c913e23aSRafał Miłecki #define RADEON_RECLOCK_DELAY_MS 200 3373a6d3fcSRafał Miłecki #define RADEON_WAIT_VBLANK_TIMEOUT 200 34c913e23aSRafał Miłecki 35f712d0c7SRafał Miłecki static const char *radeon_pm_state_type_name[5] = { 36eb2c27a0SAlex Deucher "", 37f712d0c7SRafał Miłecki "Powersave", 38f712d0c7SRafał Miłecki "Battery", 39f712d0c7SRafał Miłecki "Balanced", 40f712d0c7SRafał Miłecki "Performance", 41f712d0c7SRafał Miłecki }; 42f712d0c7SRafał Miłecki 43ce8f5370SAlex Deucher static void radeon_dynpm_idle_work_handler(struct work_struct *work); 44c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev); 45ce8f5370SAlex Deucher static bool radeon_pm_in_vbl(struct radeon_device *rdev); 46ce8f5370SAlex Deucher static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 47ce8f5370SAlex Deucher static void radeon_pm_update_profile(struct radeon_device *rdev); 48ce8f5370SAlex Deucher static void radeon_pm_set_clocks(struct radeon_device *rdev); 49ce8f5370SAlex Deucher 50a4c9e2eeSAlex Deucher int radeon_pm_get_type_index(struct radeon_device *rdev, 51a4c9e2eeSAlex Deucher enum radeon_pm_state_type ps_type, 52a4c9e2eeSAlex Deucher int instance) 53a4c9e2eeSAlex Deucher { 54a4c9e2eeSAlex Deucher int i; 55a4c9e2eeSAlex Deucher int found_instance = -1; 56a4c9e2eeSAlex Deucher 57a4c9e2eeSAlex Deucher for (i = 0; i < rdev->pm.num_power_states; i++) { 58a4c9e2eeSAlex Deucher if (rdev->pm.power_state[i].type == ps_type) { 59a4c9e2eeSAlex Deucher found_instance++; 60a4c9e2eeSAlex Deucher if (found_instance == instance) 61a4c9e2eeSAlex Deucher return i; 62a4c9e2eeSAlex Deucher } 63a4c9e2eeSAlex Deucher } 64a4c9e2eeSAlex Deucher /* return default if no match */ 65a4c9e2eeSAlex Deucher return rdev->pm.default_power_state_index; 66a4c9e2eeSAlex Deucher } 67a4c9e2eeSAlex Deucher 68c4917074SAlex Deucher void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 69ce8f5370SAlex Deucher { 70ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 71ce8f5370SAlex Deucher if (rdev->pm.profile == PM_PROFILE_AUTO) { 72ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 73ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 74ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 75ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 76ce8f5370SAlex Deucher } 77ce8f5370SAlex Deucher } 78ce8f5370SAlex Deucher } 79ce8f5370SAlex Deucher 80ce8f5370SAlex Deucher static void radeon_pm_update_profile(struct radeon_device *rdev) 81ce8f5370SAlex Deucher { 82ce8f5370SAlex Deucher switch (rdev->pm.profile) { 83ce8f5370SAlex Deucher case PM_PROFILE_DEFAULT: 84ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 85ce8f5370SAlex Deucher break; 86ce8f5370SAlex Deucher case PM_PROFILE_AUTO: 87ce8f5370SAlex Deucher if (power_supply_is_system_supplied() > 0) { 88ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 89ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 90ce8f5370SAlex Deucher else 91ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 92ce8f5370SAlex Deucher } else { 93ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 94c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 95ce8f5370SAlex Deucher else 96c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 97ce8f5370SAlex Deucher } 98ce8f5370SAlex Deucher break; 99ce8f5370SAlex Deucher case PM_PROFILE_LOW: 100ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 101ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 102ce8f5370SAlex Deucher else 103ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 104ce8f5370SAlex Deucher break; 105c9e75b21SAlex Deucher case PM_PROFILE_MID: 106c9e75b21SAlex Deucher if (rdev->pm.active_crtc_count > 1) 107c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 108c9e75b21SAlex Deucher else 109c9e75b21SAlex Deucher rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 110c9e75b21SAlex Deucher break; 111ce8f5370SAlex Deucher case PM_PROFILE_HIGH: 112ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count > 1) 113ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 114ce8f5370SAlex Deucher else 115ce8f5370SAlex Deucher rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 116ce8f5370SAlex Deucher break; 117ce8f5370SAlex Deucher } 118ce8f5370SAlex Deucher 119ce8f5370SAlex Deucher if (rdev->pm.active_crtc_count == 0) { 120ce8f5370SAlex Deucher rdev->pm.requested_power_state_index = 121ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 122ce8f5370SAlex Deucher rdev->pm.requested_clock_mode_index = 123ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 124ce8f5370SAlex Deucher } else { 125ce8f5370SAlex Deucher rdev->pm.requested_power_state_index = 126ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 127ce8f5370SAlex Deucher rdev->pm.requested_clock_mode_index = 128ce8f5370SAlex Deucher rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 129ce8f5370SAlex Deucher } 130ce8f5370SAlex Deucher } 131c913e23aSRafał Miłecki 1325876dd24SMatthew Garrett static void radeon_unmap_vram_bos(struct radeon_device *rdev) 1335876dd24SMatthew Garrett { 1345876dd24SMatthew Garrett struct radeon_bo *bo, *n; 1355876dd24SMatthew Garrett 1365876dd24SMatthew Garrett if (list_empty(&rdev->gem.objects)) 1375876dd24SMatthew Garrett return; 1385876dd24SMatthew Garrett 1395876dd24SMatthew Garrett list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 1405876dd24SMatthew Garrett if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 1415876dd24SMatthew Garrett ttm_bo_unmap_virtual(&bo->tbo); 1425876dd24SMatthew Garrett } 1435876dd24SMatthew Garrett } 1445876dd24SMatthew Garrett 145ce8f5370SAlex Deucher static void radeon_sync_with_vblank(struct radeon_device *rdev) 146ce8f5370SAlex Deucher { 147ce8f5370SAlex Deucher if (rdev->pm.active_crtcs) { 148ce8f5370SAlex Deucher rdev->pm.vblank_sync = false; 149ce8f5370SAlex Deucher wait_event_timeout( 150ce8f5370SAlex Deucher rdev->irq.vblank_queue, rdev->pm.vblank_sync, 151ce8f5370SAlex Deucher msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 152ce8f5370SAlex Deucher } 153ce8f5370SAlex Deucher } 154ce8f5370SAlex Deucher 155ce8f5370SAlex Deucher static void radeon_set_power_state(struct radeon_device *rdev) 156ce8f5370SAlex Deucher { 157ce8f5370SAlex Deucher u32 sclk, mclk; 15892645879SAlex Deucher bool misc_after = false; 159ce8f5370SAlex Deucher 160ce8f5370SAlex Deucher if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 161ce8f5370SAlex Deucher (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 162ce8f5370SAlex Deucher return; 163ce8f5370SAlex Deucher 164ce8f5370SAlex Deucher if (radeon_gui_idle(rdev)) { 165ce8f5370SAlex Deucher sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 166ce8f5370SAlex Deucher clock_info[rdev->pm.requested_clock_mode_index].sclk; 1679ace9f7bSAlex Deucher if (sclk > rdev->pm.default_sclk) 1689ace9f7bSAlex Deucher sclk = rdev->pm.default_sclk; 169ce8f5370SAlex Deucher 17027810fb2SAlex Deucher /* starting with BTC, there is one state that is used for both 17127810fb2SAlex Deucher * MH and SH. Difference is that we always use the high clock index for 17227810fb2SAlex Deucher * mclk. 17327810fb2SAlex Deucher */ 17427810fb2SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 17527810fb2SAlex Deucher (rdev->family >= CHIP_BARTS) && 17627810fb2SAlex Deucher rdev->pm.active_crtc_count && 17727810fb2SAlex Deucher ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 17827810fb2SAlex Deucher (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 17927810fb2SAlex Deucher mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 18027810fb2SAlex Deucher clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 18127810fb2SAlex Deucher else 182ce8f5370SAlex Deucher mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 183ce8f5370SAlex Deucher clock_info[rdev->pm.requested_clock_mode_index].mclk; 18427810fb2SAlex Deucher 1859ace9f7bSAlex Deucher if (mclk > rdev->pm.default_mclk) 1869ace9f7bSAlex Deucher mclk = rdev->pm.default_mclk; 187ce8f5370SAlex Deucher 18892645879SAlex Deucher /* upvolt before raising clocks, downvolt after lowering clocks */ 18992645879SAlex Deucher if (sclk < rdev->pm.current_sclk) 19092645879SAlex Deucher misc_after = true; 19192645879SAlex Deucher 19292645879SAlex Deucher radeon_sync_with_vblank(rdev); 19392645879SAlex Deucher 19492645879SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 19592645879SAlex Deucher if (!radeon_pm_in_vbl(rdev)) 19692645879SAlex Deucher return; 19792645879SAlex Deucher } 19892645879SAlex Deucher 19992645879SAlex Deucher radeon_pm_prepare(rdev); 20092645879SAlex Deucher 20192645879SAlex Deucher if (!misc_after) 202ce8f5370SAlex Deucher /* voltage, pcie lanes, etc.*/ 203ce8f5370SAlex Deucher radeon_pm_misc(rdev); 204ce8f5370SAlex Deucher 205ce8f5370SAlex Deucher /* set engine clock */ 206ce8f5370SAlex Deucher if (sclk != rdev->pm.current_sclk) { 207ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, false); 208ce8f5370SAlex Deucher radeon_set_engine_clock(rdev, sclk); 209ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, true); 210ce8f5370SAlex Deucher rdev->pm.current_sclk = sclk; 211d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 212ce8f5370SAlex Deucher } 213ce8f5370SAlex Deucher 214ce8f5370SAlex Deucher /* set memory clock */ 215798bcf73SAlex Deucher if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 216ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, false); 217ce8f5370SAlex Deucher radeon_set_memory_clock(rdev, mclk); 218ce8f5370SAlex Deucher radeon_pm_debug_check_in_vbl(rdev, true); 219ce8f5370SAlex Deucher rdev->pm.current_mclk = mclk; 220d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 221ce8f5370SAlex Deucher } 22292645879SAlex Deucher 22392645879SAlex Deucher if (misc_after) 22492645879SAlex Deucher /* voltage, pcie lanes, etc.*/ 22592645879SAlex Deucher radeon_pm_misc(rdev); 22692645879SAlex Deucher 227ce8f5370SAlex Deucher radeon_pm_finish(rdev); 228ce8f5370SAlex Deucher 229ce8f5370SAlex Deucher rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 230ce8f5370SAlex Deucher rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 231ce8f5370SAlex Deucher } else 232d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 233ce8f5370SAlex Deucher } 234ce8f5370SAlex Deucher 235ce8f5370SAlex Deucher static void radeon_pm_set_clocks(struct radeon_device *rdev) 236a424816fSAlex Deucher { 237*5f8f635eSJerome Glisse int i, r; 2382aba631cSMatthew Garrett 2394e186b2dSAlex Deucher /* no need to take locks, etc. if nothing's going to change */ 2404e186b2dSAlex Deucher if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 2414e186b2dSAlex Deucher (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 2424e186b2dSAlex Deucher return; 2434e186b2dSAlex Deucher 244612e06ceSMatthew Garrett mutex_lock(&rdev->ddev->struct_mutex); 245db7fce39SChristian König down_write(&rdev->pm.mclk_lock); 246d6999bc7SChristian König mutex_lock(&rdev->ring_lock); 2474f3218cbSAlex Deucher 24895f5a3acSAlex Deucher /* wait for the rings to drain */ 24995f5a3acSAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; i++) { 25095f5a3acSAlex Deucher struct radeon_ring *ring = &rdev->ring[i]; 251*5f8f635eSJerome Glisse if (!ring->ready) { 252*5f8f635eSJerome Glisse continue; 253*5f8f635eSJerome Glisse } 254*5f8f635eSJerome Glisse r = radeon_fence_wait_empty_locked(rdev, i); 255*5f8f635eSJerome Glisse if (r) { 256*5f8f635eSJerome Glisse /* needs a GPU reset dont reset here */ 257*5f8f635eSJerome Glisse mutex_unlock(&rdev->ring_lock); 258*5f8f635eSJerome Glisse up_write(&rdev->pm.mclk_lock); 259*5f8f635eSJerome Glisse mutex_unlock(&rdev->ddev->struct_mutex); 260*5f8f635eSJerome Glisse return; 261*5f8f635eSJerome Glisse } 262ce8f5370SAlex Deucher } 26395f5a3acSAlex Deucher 2645876dd24SMatthew Garrett radeon_unmap_vram_bos(rdev); 2655876dd24SMatthew Garrett 266ce8f5370SAlex Deucher if (rdev->irq.installed) { 2672aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 2682aba631cSMatthew Garrett if (rdev->pm.active_crtcs & (1 << i)) { 2692aba631cSMatthew Garrett rdev->pm.req_vblank |= (1 << i); 2702aba631cSMatthew Garrett drm_vblank_get(rdev->ddev, i); 2712aba631cSMatthew Garrett } 2722aba631cSMatthew Garrett } 2732aba631cSMatthew Garrett } 2742aba631cSMatthew Garrett 275ce8f5370SAlex Deucher radeon_set_power_state(rdev); 2762aba631cSMatthew Garrett 277ce8f5370SAlex Deucher if (rdev->irq.installed) { 2782aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 2792aba631cSMatthew Garrett if (rdev->pm.req_vblank & (1 << i)) { 2802aba631cSMatthew Garrett rdev->pm.req_vblank &= ~(1 << i); 2812aba631cSMatthew Garrett drm_vblank_put(rdev->ddev, i); 2822aba631cSMatthew Garrett } 2832aba631cSMatthew Garrett } 2842aba631cSMatthew Garrett } 285a424816fSAlex Deucher 286a424816fSAlex Deucher /* update display watermarks based on new power state */ 287a424816fSAlex Deucher radeon_update_bandwidth_info(rdev); 288a424816fSAlex Deucher if (rdev->pm.active_crtc_count) 289a424816fSAlex Deucher radeon_bandwidth_update(rdev); 290a424816fSAlex Deucher 291ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 2922aba631cSMatthew Garrett 293d6999bc7SChristian König mutex_unlock(&rdev->ring_lock); 294db7fce39SChristian König up_write(&rdev->pm.mclk_lock); 295612e06ceSMatthew Garrett mutex_unlock(&rdev->ddev->struct_mutex); 296a424816fSAlex Deucher } 297a424816fSAlex Deucher 298f712d0c7SRafał Miłecki static void radeon_pm_print_states(struct radeon_device *rdev) 299f712d0c7SRafał Miłecki { 300f712d0c7SRafał Miłecki int i, j; 301f712d0c7SRafał Miłecki struct radeon_power_state *power_state; 302f712d0c7SRafał Miłecki struct radeon_pm_clock_info *clock_info; 303f712d0c7SRafał Miłecki 304d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 305f712d0c7SRafał Miłecki for (i = 0; i < rdev->pm.num_power_states; i++) { 306f712d0c7SRafał Miłecki power_state = &rdev->pm.power_state[i]; 307d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("State %d: %s\n", i, 308f712d0c7SRafał Miłecki radeon_pm_state_type_name[power_state->type]); 309f712d0c7SRafał Miłecki if (i == rdev->pm.default_power_state_index) 310d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\tDefault"); 311f712d0c7SRafał Miłecki if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 312d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 313f712d0c7SRafał Miłecki if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 314d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\tSingle display only\n"); 315d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 316f712d0c7SRafał Miłecki for (j = 0; j < power_state->num_clock_modes; j++) { 317f712d0c7SRafał Miłecki clock_info = &(power_state->clock_info[j]); 318f712d0c7SRafał Miłecki if (rdev->flags & RADEON_IS_IGP) 319eb2c27a0SAlex Deucher DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 320f712d0c7SRafał Miłecki j, 321eb2c27a0SAlex Deucher clock_info->sclk * 10); 322f712d0c7SRafał Miłecki else 323eb2c27a0SAlex Deucher DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 324f712d0c7SRafał Miłecki j, 325f712d0c7SRafał Miłecki clock_info->sclk * 10, 326f712d0c7SRafał Miłecki clock_info->mclk * 10, 327eb2c27a0SAlex Deucher clock_info->voltage.voltage); 328f712d0c7SRafał Miłecki } 329f712d0c7SRafał Miłecki } 330f712d0c7SRafał Miłecki } 331f712d0c7SRafał Miłecki 332ce8f5370SAlex Deucher static ssize_t radeon_get_pm_profile(struct device *dev, 333a424816fSAlex Deucher struct device_attribute *attr, 334a424816fSAlex Deucher char *buf) 335a424816fSAlex Deucher { 336a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 337a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 338ce8f5370SAlex Deucher int cp = rdev->pm.profile; 339a424816fSAlex Deucher 340a424816fSAlex Deucher return snprintf(buf, PAGE_SIZE, "%s\n", 341ce8f5370SAlex Deucher (cp == PM_PROFILE_AUTO) ? "auto" : 342ce8f5370SAlex Deucher (cp == PM_PROFILE_LOW) ? "low" : 34312e27be8SDaniel J Blueman (cp == PM_PROFILE_MID) ? "mid" : 344ce8f5370SAlex Deucher (cp == PM_PROFILE_HIGH) ? "high" : "default"); 345a424816fSAlex Deucher } 346a424816fSAlex Deucher 347ce8f5370SAlex Deucher static ssize_t radeon_set_pm_profile(struct device *dev, 348a424816fSAlex Deucher struct device_attribute *attr, 349a424816fSAlex Deucher const char *buf, 350a424816fSAlex Deucher size_t count) 351a424816fSAlex Deucher { 352a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 353a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 354a424816fSAlex Deucher 355a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 356ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 357ce8f5370SAlex Deucher if (strncmp("default", buf, strlen("default")) == 0) 358ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_DEFAULT; 359ce8f5370SAlex Deucher else if (strncmp("auto", buf, strlen("auto")) == 0) 360ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_AUTO; 361ce8f5370SAlex Deucher else if (strncmp("low", buf, strlen("low")) == 0) 362ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_LOW; 363c9e75b21SAlex Deucher else if (strncmp("mid", buf, strlen("mid")) == 0) 364c9e75b21SAlex Deucher rdev->pm.profile = PM_PROFILE_MID; 365ce8f5370SAlex Deucher else if (strncmp("high", buf, strlen("high")) == 0) 366ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_HIGH; 367ce8f5370SAlex Deucher else { 3681783e4bfSThomas Renninger count = -EINVAL; 369ce8f5370SAlex Deucher goto fail; 370ce8f5370SAlex Deucher } 371ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 372ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 3731783e4bfSThomas Renninger } else 3741783e4bfSThomas Renninger count = -EINVAL; 3751783e4bfSThomas Renninger 376ce8f5370SAlex Deucher fail: 377a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 378a424816fSAlex Deucher 379a424816fSAlex Deucher return count; 380a424816fSAlex Deucher } 381a424816fSAlex Deucher 382ce8f5370SAlex Deucher static ssize_t radeon_get_pm_method(struct device *dev, 383ce8f5370SAlex Deucher struct device_attribute *attr, 384ce8f5370SAlex Deucher char *buf) 38556278a8eSAlex Deucher { 386ce8f5370SAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 387ce8f5370SAlex Deucher struct radeon_device *rdev = ddev->dev_private; 388ce8f5370SAlex Deucher int pm = rdev->pm.pm_method; 38956278a8eSAlex Deucher 390ce8f5370SAlex Deucher return snprintf(buf, PAGE_SIZE, "%s\n", 391ce8f5370SAlex Deucher (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); 39256278a8eSAlex Deucher } 39356278a8eSAlex Deucher 394ce8f5370SAlex Deucher static ssize_t radeon_set_pm_method(struct device *dev, 395ce8f5370SAlex Deucher struct device_attribute *attr, 396ce8f5370SAlex Deucher const char *buf, 397ce8f5370SAlex Deucher size_t count) 398d0d6cb81SRafał Miłecki { 399ce8f5370SAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 400ce8f5370SAlex Deucher struct radeon_device *rdev = ddev->dev_private; 401ce8f5370SAlex Deucher 402ce8f5370SAlex Deucher 403ce8f5370SAlex Deucher if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 404ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 405ce8f5370SAlex Deucher rdev->pm.pm_method = PM_METHOD_DYNPM; 406ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 407ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 408ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 409ce8f5370SAlex Deucher } else if (strncmp("profile", buf, strlen("profile")) == 0) { 410ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 411ce8f5370SAlex Deucher /* disable dynpm */ 412ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 413ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 4143f53eb6fSRafael J. Wysocki rdev->pm.pm_method = PM_METHOD_PROFILE; 415ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 41632c87fcaSTejun Heo cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 417ce8f5370SAlex Deucher } else { 4181783e4bfSThomas Renninger count = -EINVAL; 419ce8f5370SAlex Deucher goto fail; 420d0d6cb81SRafał Miłecki } 421ce8f5370SAlex Deucher radeon_pm_compute_clocks(rdev); 422ce8f5370SAlex Deucher fail: 423ce8f5370SAlex Deucher return count; 424ce8f5370SAlex Deucher } 425ce8f5370SAlex Deucher 426ce8f5370SAlex Deucher static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 427ce8f5370SAlex Deucher static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 428ce8f5370SAlex Deucher 42921a8122aSAlex Deucher static ssize_t radeon_hwmon_show_temp(struct device *dev, 43021a8122aSAlex Deucher struct device_attribute *attr, 43121a8122aSAlex Deucher char *buf) 43221a8122aSAlex Deucher { 43321a8122aSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 43421a8122aSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 43520d391d7SAlex Deucher int temp; 43621a8122aSAlex Deucher 43721a8122aSAlex Deucher switch (rdev->pm.int_thermal_type) { 43821a8122aSAlex Deucher case THERMAL_TYPE_RV6XX: 43921a8122aSAlex Deucher temp = rv6xx_get_temp(rdev); 44021a8122aSAlex Deucher break; 44121a8122aSAlex Deucher case THERMAL_TYPE_RV770: 44221a8122aSAlex Deucher temp = rv770_get_temp(rdev); 44321a8122aSAlex Deucher break; 44421a8122aSAlex Deucher case THERMAL_TYPE_EVERGREEN: 4454fddba1fSAlex Deucher case THERMAL_TYPE_NI: 44621a8122aSAlex Deucher temp = evergreen_get_temp(rdev); 44721a8122aSAlex Deucher break; 448e33df25fSAlex Deucher case THERMAL_TYPE_SUMO: 449e33df25fSAlex Deucher temp = sumo_get_temp(rdev); 450e33df25fSAlex Deucher break; 4511bd47d2eSAlex Deucher case THERMAL_TYPE_SI: 4521bd47d2eSAlex Deucher temp = si_get_temp(rdev); 4531bd47d2eSAlex Deucher break; 45421a8122aSAlex Deucher default: 45521a8122aSAlex Deucher temp = 0; 45621a8122aSAlex Deucher break; 45721a8122aSAlex Deucher } 45821a8122aSAlex Deucher 45921a8122aSAlex Deucher return snprintf(buf, PAGE_SIZE, "%d\n", temp); 46021a8122aSAlex Deucher } 46121a8122aSAlex Deucher 46221a8122aSAlex Deucher static ssize_t radeon_hwmon_show_name(struct device *dev, 46321a8122aSAlex Deucher struct device_attribute *attr, 46421a8122aSAlex Deucher char *buf) 46521a8122aSAlex Deucher { 46621a8122aSAlex Deucher return sprintf(buf, "radeon\n"); 46721a8122aSAlex Deucher } 46821a8122aSAlex Deucher 46921a8122aSAlex Deucher static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 47021a8122aSAlex Deucher static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 47121a8122aSAlex Deucher 47221a8122aSAlex Deucher static struct attribute *hwmon_attributes[] = { 47321a8122aSAlex Deucher &sensor_dev_attr_temp1_input.dev_attr.attr, 47421a8122aSAlex Deucher &sensor_dev_attr_name.dev_attr.attr, 47521a8122aSAlex Deucher NULL 47621a8122aSAlex Deucher }; 47721a8122aSAlex Deucher 47821a8122aSAlex Deucher static const struct attribute_group hwmon_attrgroup = { 47921a8122aSAlex Deucher .attrs = hwmon_attributes, 48021a8122aSAlex Deucher }; 48121a8122aSAlex Deucher 4820d18abedSDan Carpenter static int radeon_hwmon_init(struct radeon_device *rdev) 48321a8122aSAlex Deucher { 4840d18abedSDan Carpenter int err = 0; 48521a8122aSAlex Deucher 48621a8122aSAlex Deucher rdev->pm.int_hwmon_dev = NULL; 48721a8122aSAlex Deucher 48821a8122aSAlex Deucher switch (rdev->pm.int_thermal_type) { 48921a8122aSAlex Deucher case THERMAL_TYPE_RV6XX: 49021a8122aSAlex Deucher case THERMAL_TYPE_RV770: 49121a8122aSAlex Deucher case THERMAL_TYPE_EVERGREEN: 492457558edSAlex Deucher case THERMAL_TYPE_NI: 493e33df25fSAlex Deucher case THERMAL_TYPE_SUMO: 4941bd47d2eSAlex Deucher case THERMAL_TYPE_SI: 4955d7486c7SAlex Deucher /* No support for TN yet */ 4965d7486c7SAlex Deucher if (rdev->family == CHIP_ARUBA) 4975d7486c7SAlex Deucher return err; 49821a8122aSAlex Deucher rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 4990d18abedSDan Carpenter if (IS_ERR(rdev->pm.int_hwmon_dev)) { 5000d18abedSDan Carpenter err = PTR_ERR(rdev->pm.int_hwmon_dev); 5010d18abedSDan Carpenter dev_err(rdev->dev, 5020d18abedSDan Carpenter "Unable to register hwmon device: %d\n", err); 5030d18abedSDan Carpenter break; 5040d18abedSDan Carpenter } 50521a8122aSAlex Deucher dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); 50621a8122aSAlex Deucher err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, 50721a8122aSAlex Deucher &hwmon_attrgroup); 5080d18abedSDan Carpenter if (err) { 5090d18abedSDan Carpenter dev_err(rdev->dev, 5100d18abedSDan Carpenter "Unable to create hwmon sysfs file: %d\n", err); 5110d18abedSDan Carpenter hwmon_device_unregister(rdev->dev); 5120d18abedSDan Carpenter } 51321a8122aSAlex Deucher break; 51421a8122aSAlex Deucher default: 51521a8122aSAlex Deucher break; 51621a8122aSAlex Deucher } 5170d18abedSDan Carpenter 5180d18abedSDan Carpenter return err; 51921a8122aSAlex Deucher } 52021a8122aSAlex Deucher 52121a8122aSAlex Deucher static void radeon_hwmon_fini(struct radeon_device *rdev) 52221a8122aSAlex Deucher { 52321a8122aSAlex Deucher if (rdev->pm.int_hwmon_dev) { 52421a8122aSAlex Deucher sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); 52521a8122aSAlex Deucher hwmon_device_unregister(rdev->pm.int_hwmon_dev); 52621a8122aSAlex Deucher } 52721a8122aSAlex Deucher } 52821a8122aSAlex Deucher 529ce8f5370SAlex Deucher void radeon_pm_suspend(struct radeon_device *rdev) 530ce8f5370SAlex Deucher { 531ce8f5370SAlex Deucher mutex_lock(&rdev->pm.mutex); 5323f53eb6fSRafael J. Wysocki if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 5333f53eb6fSRafael J. Wysocki if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 5343f53eb6fSRafael J. Wysocki rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 5353f53eb6fSRafael J. Wysocki } 536ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 53732c87fcaSTejun Heo 53832c87fcaSTejun Heo cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 539ce8f5370SAlex Deucher } 540ce8f5370SAlex Deucher 541ce8f5370SAlex Deucher void radeon_pm_resume(struct radeon_device *rdev) 542ce8f5370SAlex Deucher { 543ed18a360SAlex Deucher /* set up the default clocks if the MC ucode is loaded */ 5442e3b3b10SAlex Deucher if ((rdev->family >= CHIP_BARTS) && 5452e3b3b10SAlex Deucher (rdev->family <= CHIP_CAYMAN) && 5462e3b3b10SAlex Deucher rdev->mc_fw) { 547ed18a360SAlex Deucher if (rdev->pm.default_vddc) 5488a83ec5eSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 5498a83ec5eSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDC); 5502feea49aSAlex Deucher if (rdev->pm.default_vddci) 5512feea49aSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 5522feea49aSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDCI); 553ed18a360SAlex Deucher if (rdev->pm.default_sclk) 554ed18a360SAlex Deucher radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 555ed18a360SAlex Deucher if (rdev->pm.default_mclk) 556ed18a360SAlex Deucher radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 557ed18a360SAlex Deucher } 558f8ed8b4cSAlex Deucher /* asic init will reset the default power state */ 559f8ed8b4cSAlex Deucher mutex_lock(&rdev->pm.mutex); 560f8ed8b4cSAlex Deucher rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 561f8ed8b4cSAlex Deucher rdev->pm.current_clock_mode_index = 0; 5629ace9f7bSAlex Deucher rdev->pm.current_sclk = rdev->pm.default_sclk; 5639ace9f7bSAlex Deucher rdev->pm.current_mclk = rdev->pm.default_mclk; 5644d60173fSAlex Deucher rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 5652feea49aSAlex Deucher rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 5663f53eb6fSRafael J. Wysocki if (rdev->pm.pm_method == PM_METHOD_DYNPM 5673f53eb6fSRafael J. Wysocki && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 5683f53eb6fSRafael J. Wysocki rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 56932c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 5703f53eb6fSRafael J. Wysocki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 5713f53eb6fSRafael J. Wysocki } 572f8ed8b4cSAlex Deucher mutex_unlock(&rdev->pm.mutex); 573ce8f5370SAlex Deucher radeon_pm_compute_clocks(rdev); 574d0d6cb81SRafał Miłecki } 575d0d6cb81SRafał Miłecki 5767433874eSRafał Miłecki int radeon_pm_init(struct radeon_device *rdev) 5777433874eSRafał Miłecki { 57826481fb1SDave Airlie int ret; 5790d18abedSDan Carpenter 580ce8f5370SAlex Deucher /* default to profile method */ 581ce8f5370SAlex Deucher rdev->pm.pm_method = PM_METHOD_PROFILE; 582f8ed8b4cSAlex Deucher rdev->pm.profile = PM_PROFILE_DEFAULT; 583ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 584ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 585ce8f5370SAlex Deucher rdev->pm.dynpm_can_upclock = true; 586ce8f5370SAlex Deucher rdev->pm.dynpm_can_downclock = true; 5879ace9f7bSAlex Deucher rdev->pm.default_sclk = rdev->clock.default_sclk; 5889ace9f7bSAlex Deucher rdev->pm.default_mclk = rdev->clock.default_mclk; 589f8ed8b4cSAlex Deucher rdev->pm.current_sclk = rdev->clock.default_sclk; 590f8ed8b4cSAlex Deucher rdev->pm.current_mclk = rdev->clock.default_mclk; 59121a8122aSAlex Deucher rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 592c913e23aSRafał Miłecki 59356278a8eSAlex Deucher if (rdev->bios) { 59456278a8eSAlex Deucher if (rdev->is_atom_bios) 59556278a8eSAlex Deucher radeon_atombios_get_power_modes(rdev); 59656278a8eSAlex Deucher else 59756278a8eSAlex Deucher radeon_combios_get_power_modes(rdev); 598f712d0c7SRafał Miłecki radeon_pm_print_states(rdev); 599ce8f5370SAlex Deucher radeon_pm_init_profile(rdev); 600ed18a360SAlex Deucher /* set up the default clocks if the MC ucode is loaded */ 6012e3b3b10SAlex Deucher if ((rdev->family >= CHIP_BARTS) && 6022e3b3b10SAlex Deucher (rdev->family <= CHIP_CAYMAN) && 6032e3b3b10SAlex Deucher rdev->mc_fw) { 604ed18a360SAlex Deucher if (rdev->pm.default_vddc) 6058a83ec5eSAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 6068a83ec5eSAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDC); 6074639dd21SAlex Deucher if (rdev->pm.default_vddci) 6084639dd21SAlex Deucher radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 6094639dd21SAlex Deucher SET_VOLTAGE_TYPE_ASIC_VDDCI); 610ed18a360SAlex Deucher if (rdev->pm.default_sclk) 611ed18a360SAlex Deucher radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 612ed18a360SAlex Deucher if (rdev->pm.default_mclk) 613ed18a360SAlex Deucher radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 614ed18a360SAlex Deucher } 61556278a8eSAlex Deucher } 61656278a8eSAlex Deucher 61721a8122aSAlex Deucher /* set up the internal thermal sensor if applicable */ 6180d18abedSDan Carpenter ret = radeon_hwmon_init(rdev); 6190d18abedSDan Carpenter if (ret) 6200d18abedSDan Carpenter return ret; 62132c87fcaSTejun Heo 62232c87fcaSTejun Heo INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 62332c87fcaSTejun Heo 624ce8f5370SAlex Deucher if (rdev->pm.num_power_states > 1) { 625ce8f5370SAlex Deucher /* where's the best place to put these? */ 62626481fb1SDave Airlie ret = device_create_file(rdev->dev, &dev_attr_power_profile); 62726481fb1SDave Airlie if (ret) 62826481fb1SDave Airlie DRM_ERROR("failed to create device file for power profile\n"); 62926481fb1SDave Airlie ret = device_create_file(rdev->dev, &dev_attr_power_method); 63026481fb1SDave Airlie if (ret) 63126481fb1SDave Airlie DRM_ERROR("failed to create device file for power method\n"); 632ce8f5370SAlex Deucher 6337433874eSRafał Miłecki if (radeon_debugfs_pm_init(rdev)) { 634c142c3e5SRafał Miłecki DRM_ERROR("Failed to register debugfs file for PM!\n"); 6357433874eSRafał Miłecki } 6367433874eSRafał Miłecki 637c913e23aSRafał Miłecki DRM_INFO("radeon: power management initialized\n"); 638ce8f5370SAlex Deucher } 639c913e23aSRafał Miłecki 6407433874eSRafał Miłecki return 0; 6417433874eSRafał Miłecki } 6427433874eSRafał Miłecki 64329fb52caSAlex Deucher void radeon_pm_fini(struct radeon_device *rdev) 64429fb52caSAlex Deucher { 645ce8f5370SAlex Deucher if (rdev->pm.num_power_states > 1) { 646a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 647ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 648ce8f5370SAlex Deucher rdev->pm.profile = PM_PROFILE_DEFAULT; 649ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 650ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 651ce8f5370SAlex Deucher } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 652ce8f5370SAlex Deucher /* reset default clocks */ 653ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 654ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 655ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 65658e21dffSAlex Deucher } 657ce8f5370SAlex Deucher mutex_unlock(&rdev->pm.mutex); 65832c87fcaSTejun Heo 65932c87fcaSTejun Heo cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 66058e21dffSAlex Deucher 661ce8f5370SAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_profile); 662ce8f5370SAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_method); 663ce8f5370SAlex Deucher } 664a424816fSAlex Deucher 6650975b162SAlex Deucher if (rdev->pm.power_state) 6660975b162SAlex Deucher kfree(rdev->pm.power_state); 6670975b162SAlex Deucher 66821a8122aSAlex Deucher radeon_hwmon_fini(rdev); 66929fb52caSAlex Deucher } 67029fb52caSAlex Deucher 671c913e23aSRafał Miłecki void radeon_pm_compute_clocks(struct radeon_device *rdev) 672c913e23aSRafał Miłecki { 673c913e23aSRafał Miłecki struct drm_device *ddev = rdev->ddev; 674a48b9b4eSAlex Deucher struct drm_crtc *crtc; 675c913e23aSRafał Miłecki struct radeon_crtc *radeon_crtc; 676c913e23aSRafał Miłecki 677ce8f5370SAlex Deucher if (rdev->pm.num_power_states < 2) 678ce8f5370SAlex Deucher return; 679ce8f5370SAlex Deucher 680c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 681c913e23aSRafał Miłecki 682c913e23aSRafał Miłecki rdev->pm.active_crtcs = 0; 683a48b9b4eSAlex Deucher rdev->pm.active_crtc_count = 0; 684a48b9b4eSAlex Deucher list_for_each_entry(crtc, 685a48b9b4eSAlex Deucher &ddev->mode_config.crtc_list, head) { 686a48b9b4eSAlex Deucher radeon_crtc = to_radeon_crtc(crtc); 687a48b9b4eSAlex Deucher if (radeon_crtc->enabled) { 688c913e23aSRafał Miłecki rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 689a48b9b4eSAlex Deucher rdev->pm.active_crtc_count++; 690c913e23aSRafał Miłecki } 691c913e23aSRafał Miłecki } 692c913e23aSRafał Miłecki 693ce8f5370SAlex Deucher if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 694ce8f5370SAlex Deucher radeon_pm_update_profile(rdev); 695ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 696ce8f5370SAlex Deucher } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 697ce8f5370SAlex Deucher if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 698a48b9b4eSAlex Deucher if (rdev->pm.active_crtc_count > 1) { 699ce8f5370SAlex Deucher if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 700ce8f5370SAlex Deucher cancel_delayed_work(&rdev->pm.dynpm_idle_work); 701c913e23aSRafał Miłecki 702ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 703ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 704ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 705ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 706c913e23aSRafał Miłecki 707d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 708c913e23aSRafał Miłecki } 709a48b9b4eSAlex Deucher } else if (rdev->pm.active_crtc_count == 1) { 710c913e23aSRafał Miłecki /* TODO: Increase clocks if needed for current mode */ 711c913e23aSRafał Miłecki 712ce8f5370SAlex Deucher if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 713ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 714ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 715ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 716ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 717c913e23aSRafał Miłecki 71832c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 719c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 720ce8f5370SAlex Deucher } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 721ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 72232c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 723c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 724d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 725c913e23aSRafał Miłecki } 726a48b9b4eSAlex Deucher } else { /* count == 0 */ 727ce8f5370SAlex Deucher if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 728ce8f5370SAlex Deucher cancel_delayed_work(&rdev->pm.dynpm_idle_work); 729c913e23aSRafał Miłecki 730ce8f5370SAlex Deucher rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 731ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 732ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 733ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 734ce8f5370SAlex Deucher } 735ce8f5370SAlex Deucher } 73673a6d3fcSRafał Miłecki } 737c913e23aSRafał Miłecki } 738c913e23aSRafał Miłecki 739c913e23aSRafał Miłecki mutex_unlock(&rdev->pm.mutex); 740c913e23aSRafał Miłecki } 741c913e23aSRafał Miłecki 742ce8f5370SAlex Deucher static bool radeon_pm_in_vbl(struct radeon_device *rdev) 743f735261bSDave Airlie { 74475fa0b08SMario Kleiner int crtc, vpos, hpos, vbl_status; 745f735261bSDave Airlie bool in_vbl = true; 746f735261bSDave Airlie 74775fa0b08SMario Kleiner /* Iterate over all active crtc's. All crtc's must be in vblank, 74875fa0b08SMario Kleiner * otherwise return in_vbl == false. 74975fa0b08SMario Kleiner */ 75075fa0b08SMario Kleiner for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 75175fa0b08SMario Kleiner if (rdev->pm.active_crtcs & (1 << crtc)) { 752f5a80209SMario Kleiner vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); 753f5a80209SMario Kleiner if ((vbl_status & DRM_SCANOUTPOS_VALID) && 754f5a80209SMario Kleiner !(vbl_status & DRM_SCANOUTPOS_INVBL)) 755f735261bSDave Airlie in_vbl = false; 756f735261bSDave Airlie } 757f735261bSDave Airlie } 758f81f2024SMatthew Garrett 759f81f2024SMatthew Garrett return in_vbl; 760f81f2024SMatthew Garrett } 761f81f2024SMatthew Garrett 762ce8f5370SAlex Deucher static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 763f81f2024SMatthew Garrett { 764f81f2024SMatthew Garrett u32 stat_crtc = 0; 765f81f2024SMatthew Garrett bool in_vbl = radeon_pm_in_vbl(rdev); 766f81f2024SMatthew Garrett 767f735261bSDave Airlie if (in_vbl == false) 768d9fdaafbSDave Airlie DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 769bae6b562SAlex Deucher finish ? "exit" : "entry"); 770f735261bSDave Airlie return in_vbl; 771f735261bSDave Airlie } 772c913e23aSRafał Miłecki 773ce8f5370SAlex Deucher static void radeon_dynpm_idle_work_handler(struct work_struct *work) 774c913e23aSRafał Miłecki { 775c913e23aSRafał Miłecki struct radeon_device *rdev; 776d9932a32SMatthew Garrett int resched; 777c913e23aSRafał Miłecki rdev = container_of(work, struct radeon_device, 778ce8f5370SAlex Deucher pm.dynpm_idle_work.work); 779c913e23aSRafał Miłecki 780d9932a32SMatthew Garrett resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 781c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 782ce8f5370SAlex Deucher if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 783c913e23aSRafał Miłecki int not_processed = 0; 7847465280cSAlex Deucher int i; 785c913e23aSRafał Miłecki 7867465280cSAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; ++i) { 7870ec0612aSAlex Deucher struct radeon_ring *ring = &rdev->ring[i]; 7880ec0612aSAlex Deucher 7890ec0612aSAlex Deucher if (ring->ready) { 79047492a23SChristian König not_processed += radeon_fence_count_emitted(rdev, i); 7917465280cSAlex Deucher if (not_processed >= 3) 7927465280cSAlex Deucher break; 7937465280cSAlex Deucher } 7940ec0612aSAlex Deucher } 795c913e23aSRafał Miłecki 796c913e23aSRafał Miłecki if (not_processed >= 3) { /* should upclock */ 797ce8f5370SAlex Deucher if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 798ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 799ce8f5370SAlex Deucher } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 800ce8f5370SAlex Deucher rdev->pm.dynpm_can_upclock) { 801ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = 802ce8f5370SAlex Deucher DYNPM_ACTION_UPCLOCK; 803ce8f5370SAlex Deucher rdev->pm.dynpm_action_timeout = jiffies + 804c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 805c913e23aSRafał Miłecki } 806c913e23aSRafał Miłecki } else if (not_processed == 0) { /* should downclock */ 807ce8f5370SAlex Deucher if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 808ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 809ce8f5370SAlex Deucher } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 810ce8f5370SAlex Deucher rdev->pm.dynpm_can_downclock) { 811ce8f5370SAlex Deucher rdev->pm.dynpm_planned_action = 812ce8f5370SAlex Deucher DYNPM_ACTION_DOWNCLOCK; 813ce8f5370SAlex Deucher rdev->pm.dynpm_action_timeout = jiffies + 814c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 815c913e23aSRafał Miłecki } 816c913e23aSRafał Miłecki } 817c913e23aSRafał Miłecki 818d7311171SAlex Deucher /* Note, radeon_pm_set_clocks is called with static_switch set 819d7311171SAlex Deucher * to false since we want to wait for vbl to avoid flicker. 820d7311171SAlex Deucher */ 821ce8f5370SAlex Deucher if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 822ce8f5370SAlex Deucher jiffies > rdev->pm.dynpm_action_timeout) { 823ce8f5370SAlex Deucher radeon_pm_get_dynpm_state(rdev); 824ce8f5370SAlex Deucher radeon_pm_set_clocks(rdev); 825c913e23aSRafał Miłecki } 826c913e23aSRafał Miłecki 82732c87fcaSTejun Heo schedule_delayed_work(&rdev->pm.dynpm_idle_work, 828c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 829c913e23aSRafał Miłecki } 8303f53eb6fSRafael J. Wysocki mutex_unlock(&rdev->pm.mutex); 8313f53eb6fSRafael J. Wysocki ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 8323f53eb6fSRafael J. Wysocki } 833c913e23aSRafał Miłecki 8347433874eSRafał Miłecki /* 8357433874eSRafał Miłecki * Debugfs info 8367433874eSRafał Miłecki */ 8377433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 8387433874eSRafał Miłecki 8397433874eSRafał Miłecki static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 8407433874eSRafał Miłecki { 8417433874eSRafał Miłecki struct drm_info_node *node = (struct drm_info_node *) m->private; 8427433874eSRafał Miłecki struct drm_device *dev = node->minor->dev; 8437433874eSRafał Miłecki struct radeon_device *rdev = dev->dev_private; 8447433874eSRafał Miłecki 8459ace9f7bSAlex Deucher seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 8466234077dSRafał Miłecki seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 8479ace9f7bSAlex Deucher seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 848798bcf73SAlex Deucher if (rdev->asic->pm.get_memory_clock) 8496234077dSRafał Miłecki seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 8500fcbe947SRafał Miłecki if (rdev->pm.current_vddc) 8510fcbe947SRafał Miłecki seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 852798bcf73SAlex Deucher if (rdev->asic->pm.get_pcie_lanes) 853aa5120d2SRafał Miłecki seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 8547433874eSRafał Miłecki 8557433874eSRafał Miłecki return 0; 8567433874eSRafał Miłecki } 8577433874eSRafał Miłecki 8587433874eSRafał Miłecki static struct drm_info_list radeon_pm_info_list[] = { 8597433874eSRafał Miłecki {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 8607433874eSRafał Miłecki }; 8617433874eSRafał Miłecki #endif 8627433874eSRafał Miłecki 863c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev) 8647433874eSRafał Miłecki { 8657433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 8667433874eSRafał Miłecki return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 8677433874eSRafał Miłecki #else 8687433874eSRafał Miłecki return 0; 8697433874eSRafał Miłecki #endif 8707433874eSRafał Miłecki } 871