17433874eSRafał Miłecki /* 27433874eSRafał Miłecki * Permission is hereby granted, free of charge, to any person obtaining a 37433874eSRafał Miłecki * copy of this software and associated documentation files (the "Software"), 47433874eSRafał Miłecki * to deal in the Software without restriction, including without limitation 57433874eSRafał Miłecki * the rights to use, copy, modify, merge, publish, distribute, sublicense, 67433874eSRafał Miłecki * and/or sell copies of the Software, and to permit persons to whom the 77433874eSRafał Miłecki * Software is furnished to do so, subject to the following conditions: 87433874eSRafał Miłecki * 97433874eSRafał Miłecki * The above copyright notice and this permission notice shall be included in 107433874eSRafał Miłecki * all copies or substantial portions of the Software. 117433874eSRafał Miłecki * 127433874eSRafał Miłecki * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 137433874eSRafał Miłecki * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 147433874eSRafał Miłecki * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 157433874eSRafał Miłecki * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 167433874eSRafał Miłecki * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 177433874eSRafał Miłecki * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 187433874eSRafał Miłecki * OTHER DEALINGS IN THE SOFTWARE. 197433874eSRafał Miłecki * 207433874eSRafał Miłecki * Authors: Rafał Miłecki <zajec5@gmail.com> 2156278a8eSAlex Deucher * Alex Deucher <alexdeucher@gmail.com> 227433874eSRafał Miłecki */ 237433874eSRafał Miłecki #include "drmP.h" 247433874eSRafał Miłecki #include "radeon.h" 25f735261bSDave Airlie #include "avivod.h" 267433874eSRafał Miłecki 27c913e23aSRafał Miłecki #define RADEON_IDLE_LOOP_MS 100 28c913e23aSRafał Miłecki #define RADEON_RECLOCK_DELAY_MS 200 2973a6d3fcSRafał Miłecki #define RADEON_WAIT_VBLANK_TIMEOUT 200 302031f77cSAlex Deucher #define RADEON_WAIT_IDLE_TIMEOUT 200 31c913e23aSRafał Miłecki 32c913e23aSRafał Miłecki static void radeon_pm_idle_work_handler(struct work_struct *work); 33c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev); 34c913e23aSRafał Miłecki 355876dd24SMatthew Garrett static void radeon_unmap_vram_bos(struct radeon_device *rdev) 365876dd24SMatthew Garrett { 375876dd24SMatthew Garrett struct radeon_bo *bo, *n; 385876dd24SMatthew Garrett 395876dd24SMatthew Garrett if (list_empty(&rdev->gem.objects)) 405876dd24SMatthew Garrett return; 415876dd24SMatthew Garrett 425876dd24SMatthew Garrett list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 435876dd24SMatthew Garrett if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 445876dd24SMatthew Garrett ttm_bo_unmap_virtual(&bo->tbo); 455876dd24SMatthew Garrett } 465876dd24SMatthew Garrett 475876dd24SMatthew Garrett if (rdev->gart.table.vram.robj) 485876dd24SMatthew Garrett ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo); 495876dd24SMatthew Garrett 505876dd24SMatthew Garrett if (rdev->stollen_vga_memory) 515876dd24SMatthew Garrett ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo); 525876dd24SMatthew Garrett 535876dd24SMatthew Garrett if (rdev->r600_blit.shader_obj) 545876dd24SMatthew Garrett ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo); 555876dd24SMatthew Garrett } 565876dd24SMatthew Garrett 572aba631cSMatthew Garrett static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch) 58a424816fSAlex Deucher { 592aba631cSMatthew Garrett int i; 602aba631cSMatthew Garrett 61c37d230aSMatthew Garrett if (!static_switch) 62c37d230aSMatthew Garrett radeon_get_power_state(rdev, rdev->pm.planned_action); 63c37d230aSMatthew Garrett 64612e06ceSMatthew Garrett mutex_lock(&rdev->ddev->struct_mutex); 65612e06ceSMatthew Garrett mutex_lock(&rdev->vram_mutex); 66a424816fSAlex Deucher mutex_lock(&rdev->cp.mutex); 67*4f3218cbSAlex Deucher 68*4f3218cbSAlex Deucher /* gui idle int has issues on older chips it seems */ 69*4f3218cbSAlex Deucher if (rdev->family >= CHIP_R600) { 70a424816fSAlex Deucher /* wait for GPU idle */ 71a424816fSAlex Deucher rdev->pm.gui_idle = false; 72a424816fSAlex Deucher rdev->irq.gui_idle = true; 73a424816fSAlex Deucher radeon_irq_set(rdev); 74a424816fSAlex Deucher wait_event_interruptible_timeout( 75a424816fSAlex Deucher rdev->irq.idle_queue, rdev->pm.gui_idle, 76a424816fSAlex Deucher msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); 77a424816fSAlex Deucher rdev->irq.gui_idle = false; 78a424816fSAlex Deucher radeon_irq_set(rdev); 79*4f3218cbSAlex Deucher } 805876dd24SMatthew Garrett radeon_unmap_vram_bos(rdev); 815876dd24SMatthew Garrett 822aba631cSMatthew Garrett if (!static_switch) { 832aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 842aba631cSMatthew Garrett if (rdev->pm.active_crtcs & (1 << i)) { 852aba631cSMatthew Garrett rdev->pm.req_vblank |= (1 << i); 862aba631cSMatthew Garrett drm_vblank_get(rdev->ddev, i); 872aba631cSMatthew Garrett } 882aba631cSMatthew Garrett } 892aba631cSMatthew Garrett } 902aba631cSMatthew Garrett 912aba631cSMatthew Garrett radeon_set_power_state(rdev, static_switch); 922aba631cSMatthew Garrett 932aba631cSMatthew Garrett if (!static_switch) { 942aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 952aba631cSMatthew Garrett if (rdev->pm.req_vblank & (1 << i)) { 962aba631cSMatthew Garrett rdev->pm.req_vblank &= ~(1 << i); 972aba631cSMatthew Garrett drm_vblank_put(rdev->ddev, i); 982aba631cSMatthew Garrett } 992aba631cSMatthew Garrett } 1002aba631cSMatthew Garrett } 101a424816fSAlex Deucher 102a424816fSAlex Deucher /* update display watermarks based on new power state */ 103a424816fSAlex Deucher radeon_update_bandwidth_info(rdev); 104a424816fSAlex Deucher if (rdev->pm.active_crtc_count) 105a424816fSAlex Deucher radeon_bandwidth_update(rdev); 106a424816fSAlex Deucher 1072aba631cSMatthew Garrett rdev->pm.planned_action = PM_ACTION_NONE; 1082aba631cSMatthew Garrett 109a424816fSAlex Deucher mutex_unlock(&rdev->cp.mutex); 110612e06ceSMatthew Garrett mutex_unlock(&rdev->vram_mutex); 111612e06ceSMatthew Garrett mutex_unlock(&rdev->ddev->struct_mutex); 112a424816fSAlex Deucher } 113a424816fSAlex Deucher 114a424816fSAlex Deucher static ssize_t radeon_get_power_state_static(struct device *dev, 115a424816fSAlex Deucher struct device_attribute *attr, 116a424816fSAlex Deucher char *buf) 117a424816fSAlex Deucher { 118a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 119a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 120a424816fSAlex Deucher 121a424816fSAlex Deucher return snprintf(buf, PAGE_SIZE, "%d.%d\n", rdev->pm.current_power_state_index, 122a424816fSAlex Deucher rdev->pm.current_clock_mode_index); 123a424816fSAlex Deucher } 124a424816fSAlex Deucher 125a424816fSAlex Deucher static ssize_t radeon_set_power_state_static(struct device *dev, 126a424816fSAlex Deucher struct device_attribute *attr, 127a424816fSAlex Deucher const char *buf, 128a424816fSAlex Deucher size_t count) 129a424816fSAlex Deucher { 130a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 131a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 132a424816fSAlex Deucher int ps, cm; 133a424816fSAlex Deucher 134a424816fSAlex Deucher if (sscanf(buf, "%u.%u", &ps, &cm) != 2) { 135a424816fSAlex Deucher DRM_ERROR("Invalid power state!\n"); 136a424816fSAlex Deucher return count; 137a424816fSAlex Deucher } 138a424816fSAlex Deucher 139a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 140a424816fSAlex Deucher if ((ps >= 0) && (ps < rdev->pm.num_power_states) && 141a424816fSAlex Deucher (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) { 142a424816fSAlex Deucher if ((rdev->pm.active_crtc_count > 1) && 143a424816fSAlex Deucher (rdev->pm.power_state[ps].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)) { 144a424816fSAlex Deucher DRM_ERROR("Invalid power state for multi-head: %d.%d\n", ps, cm); 145a424816fSAlex Deucher } else { 146a424816fSAlex Deucher /* disable dynpm */ 147a424816fSAlex Deucher rdev->pm.state = PM_STATE_DISABLED; 148a424816fSAlex Deucher rdev->pm.planned_action = PM_ACTION_NONE; 149a424816fSAlex Deucher rdev->pm.requested_power_state_index = ps; 150a424816fSAlex Deucher rdev->pm.requested_clock_mode_index = cm; 1512aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, true); 152a424816fSAlex Deucher } 153a424816fSAlex Deucher } else 154a424816fSAlex Deucher DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm); 155a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 156a424816fSAlex Deucher 157a424816fSAlex Deucher return count; 158a424816fSAlex Deucher } 159a424816fSAlex Deucher 160a424816fSAlex Deucher static ssize_t radeon_get_dynpm(struct device *dev, 161a424816fSAlex Deucher struct device_attribute *attr, 162a424816fSAlex Deucher char *buf) 163a424816fSAlex Deucher { 164a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 165a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 166a424816fSAlex Deucher 167a424816fSAlex Deucher return snprintf(buf, PAGE_SIZE, "%s\n", 168a424816fSAlex Deucher (rdev->pm.state == PM_STATE_DISABLED) ? "disabled" : "enabled"); 169a424816fSAlex Deucher } 170a424816fSAlex Deucher 171a424816fSAlex Deucher static ssize_t radeon_set_dynpm(struct device *dev, 172a424816fSAlex Deucher struct device_attribute *attr, 173a424816fSAlex Deucher const char *buf, 174a424816fSAlex Deucher size_t count) 175a424816fSAlex Deucher { 176a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 177a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 178a424816fSAlex Deucher int tmp = simple_strtoul(buf, NULL, 10); 179a424816fSAlex Deucher 180a424816fSAlex Deucher if (tmp == 0) { 181a424816fSAlex Deucher /* update power mode info */ 182a424816fSAlex Deucher radeon_pm_compute_clocks(rdev); 183a424816fSAlex Deucher /* disable dynpm */ 184a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 185a424816fSAlex Deucher rdev->pm.state = PM_STATE_DISABLED; 186a424816fSAlex Deucher rdev->pm.planned_action = PM_ACTION_NONE; 187a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 188a424816fSAlex Deucher DRM_INFO("radeon: dynamic power management disabled\n"); 189a424816fSAlex Deucher } else if (tmp == 1) { 190a424816fSAlex Deucher if (rdev->pm.num_power_states > 1) { 191a424816fSAlex Deucher /* enable dynpm */ 192a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 193a424816fSAlex Deucher rdev->pm.state = PM_STATE_PAUSED; 194a424816fSAlex Deucher rdev->pm.planned_action = PM_ACTION_DEFAULT; 195a424816fSAlex Deucher radeon_get_power_state(rdev, rdev->pm.planned_action); 196a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 197a424816fSAlex Deucher /* update power mode info */ 198a424816fSAlex Deucher radeon_pm_compute_clocks(rdev); 199a424816fSAlex Deucher DRM_INFO("radeon: dynamic power management enabled\n"); 200a424816fSAlex Deucher } else 201a424816fSAlex Deucher DRM_ERROR("dynpm not valid on this system\n"); 202a424816fSAlex Deucher } else 203a424816fSAlex Deucher DRM_ERROR("Invalid setting: %d\n", tmp); 204a424816fSAlex Deucher 205a424816fSAlex Deucher return count; 206a424816fSAlex Deucher } 207a424816fSAlex Deucher 208a424816fSAlex Deucher static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, radeon_get_power_state_static, radeon_set_power_state_static); 209a424816fSAlex Deucher static DEVICE_ATTR(dynpm, S_IRUGO | S_IWUSR, radeon_get_dynpm, radeon_set_dynpm); 210a424816fSAlex Deucher 211a424816fSAlex Deucher 212c913e23aSRafał Miłecki static const char *pm_state_names[4] = { 213c913e23aSRafał Miłecki "PM_STATE_DISABLED", 214c913e23aSRafał Miłecki "PM_STATE_MINIMUM", 215c913e23aSRafał Miłecki "PM_STATE_PAUSED", 216c913e23aSRafał Miłecki "PM_STATE_ACTIVE" 217c913e23aSRafał Miłecki }; 2187433874eSRafał Miłecki 2190ec0e74fSAlex Deucher static const char *pm_state_types[5] = { 220d91eeb78SAlex Deucher "", 2210ec0e74fSAlex Deucher "Powersave", 2220ec0e74fSAlex Deucher "Battery", 2230ec0e74fSAlex Deucher "Balanced", 2240ec0e74fSAlex Deucher "Performance", 2250ec0e74fSAlex Deucher }; 2260ec0e74fSAlex Deucher 22756278a8eSAlex Deucher static void radeon_print_power_mode_info(struct radeon_device *rdev) 22856278a8eSAlex Deucher { 22956278a8eSAlex Deucher int i, j; 23056278a8eSAlex Deucher bool is_default; 23156278a8eSAlex Deucher 23256278a8eSAlex Deucher DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); 23356278a8eSAlex Deucher for (i = 0; i < rdev->pm.num_power_states; i++) { 234a48b9b4eSAlex Deucher if (rdev->pm.default_power_state_index == i) 23556278a8eSAlex Deucher is_default = true; 23656278a8eSAlex Deucher else 23756278a8eSAlex Deucher is_default = false; 2380ec0e74fSAlex Deucher DRM_INFO("State %d %s %s\n", i, 2390ec0e74fSAlex Deucher pm_state_types[rdev->pm.power_state[i].type], 2400ec0e74fSAlex Deucher is_default ? "(default)" : ""); 24156278a8eSAlex Deucher if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 24279daedc9SAlex Deucher DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].pcie_lanes); 243a48b9b4eSAlex Deucher if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY) 244a48b9b4eSAlex Deucher DRM_INFO("\tSingle display only\n"); 24556278a8eSAlex Deucher DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); 24656278a8eSAlex Deucher for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { 24756278a8eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) 24856278a8eSAlex Deucher DRM_INFO("\t\t%d engine: %d\n", 24956278a8eSAlex Deucher j, 25056278a8eSAlex Deucher rdev->pm.power_state[i].clock_info[j].sclk * 10); 25156278a8eSAlex Deucher else 25256278a8eSAlex Deucher DRM_INFO("\t\t%d engine/memory: %d/%d\n", 25356278a8eSAlex Deucher j, 25456278a8eSAlex Deucher rdev->pm.power_state[i].clock_info[j].sclk * 10, 25556278a8eSAlex Deucher rdev->pm.power_state[i].clock_info[j].mclk * 10); 25656278a8eSAlex Deucher } 25756278a8eSAlex Deucher } 25856278a8eSAlex Deucher } 25956278a8eSAlex Deucher 260bae6b562SAlex Deucher void radeon_sync_with_vblank(struct radeon_device *rdev) 261d0d6cb81SRafał Miłecki { 262d0d6cb81SRafał Miłecki if (rdev->pm.active_crtcs) { 263d0d6cb81SRafał Miłecki rdev->pm.vblank_sync = false; 264d0d6cb81SRafał Miłecki wait_event_timeout( 265d0d6cb81SRafał Miłecki rdev->irq.vblank_queue, rdev->pm.vblank_sync, 266d0d6cb81SRafał Miłecki msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 267d0d6cb81SRafał Miłecki } 268d0d6cb81SRafał Miłecki } 269d0d6cb81SRafał Miłecki 2707433874eSRafał Miłecki int radeon_pm_init(struct radeon_device *rdev) 2717433874eSRafał Miłecki { 272c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_DISABLED; 273c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_NONE; 274a48b9b4eSAlex Deucher rdev->pm.can_upclock = true; 275a48b9b4eSAlex Deucher rdev->pm.can_downclock = true; 276c913e23aSRafał Miłecki 27756278a8eSAlex Deucher if (rdev->bios) { 27856278a8eSAlex Deucher if (rdev->is_atom_bios) 27956278a8eSAlex Deucher radeon_atombios_get_power_modes(rdev); 28056278a8eSAlex Deucher else 28156278a8eSAlex Deucher radeon_combios_get_power_modes(rdev); 28256278a8eSAlex Deucher radeon_print_power_mode_info(rdev); 28356278a8eSAlex Deucher } 28456278a8eSAlex Deucher 2857433874eSRafał Miłecki if (radeon_debugfs_pm_init(rdev)) { 286c142c3e5SRafał Miłecki DRM_ERROR("Failed to register debugfs file for PM!\n"); 2877433874eSRafał Miłecki } 2887433874eSRafał Miłecki 289a424816fSAlex Deucher /* where's the best place to put this? */ 290a424816fSAlex Deucher device_create_file(rdev->dev, &dev_attr_power_state); 291a424816fSAlex Deucher device_create_file(rdev->dev, &dev_attr_dynpm); 292a424816fSAlex Deucher 293c913e23aSRafał Miłecki INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); 294c913e23aSRafał Miłecki 29590c39059SAlex Deucher if ((radeon_dynpm != -1 && radeon_dynpm) && (rdev->pm.num_power_states > 1)) { 296c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_PAUSED; 297c913e23aSRafał Miłecki DRM_INFO("radeon: dynamic power management enabled\n"); 298c913e23aSRafał Miłecki } 299c913e23aSRafał Miłecki 300c913e23aSRafał Miłecki DRM_INFO("radeon: power management initialized\n"); 301c913e23aSRafał Miłecki 3027433874eSRafał Miłecki return 0; 3037433874eSRafał Miłecki } 3047433874eSRafał Miłecki 30529fb52caSAlex Deucher void radeon_pm_fini(struct radeon_device *rdev) 30629fb52caSAlex Deucher { 30758e21dffSAlex Deucher if (rdev->pm.state != PM_STATE_DISABLED) { 30858e21dffSAlex Deucher /* cancel work */ 30958e21dffSAlex Deucher cancel_delayed_work_sync(&rdev->pm.idle_work); 31058e21dffSAlex Deucher /* reset default clocks */ 31158e21dffSAlex Deucher rdev->pm.state = PM_STATE_DISABLED; 31258e21dffSAlex Deucher rdev->pm.planned_action = PM_ACTION_DEFAULT; 3132aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, false); 314a424816fSAlex Deucher } else if ((rdev->pm.current_power_state_index != 315a424816fSAlex Deucher rdev->pm.default_power_state_index) || 316a424816fSAlex Deucher (rdev->pm.current_clock_mode_index != 0)) { 317a424816fSAlex Deucher rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 318a424816fSAlex Deucher rdev->pm.requested_clock_mode_index = 0; 319a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 3202aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, true); 321a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 32258e21dffSAlex Deucher } 32358e21dffSAlex Deucher 324a424816fSAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_state); 325a424816fSAlex Deucher device_remove_file(rdev->dev, &dev_attr_dynpm); 326a424816fSAlex Deucher 32729fb52caSAlex Deucher if (rdev->pm.i2c_bus) 32829fb52caSAlex Deucher radeon_i2c_destroy(rdev->pm.i2c_bus); 32929fb52caSAlex Deucher } 33029fb52caSAlex Deucher 331c913e23aSRafał Miłecki void radeon_pm_compute_clocks(struct radeon_device *rdev) 332c913e23aSRafał Miłecki { 333c913e23aSRafał Miłecki struct drm_device *ddev = rdev->ddev; 334a48b9b4eSAlex Deucher struct drm_crtc *crtc; 335c913e23aSRafał Miłecki struct radeon_crtc *radeon_crtc; 336c913e23aSRafał Miłecki 337c913e23aSRafał Miłecki if (rdev->pm.state == PM_STATE_DISABLED) 338c913e23aSRafał Miłecki return; 339c913e23aSRafał Miłecki 340c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 341c913e23aSRafał Miłecki 342c913e23aSRafał Miłecki rdev->pm.active_crtcs = 0; 343a48b9b4eSAlex Deucher rdev->pm.active_crtc_count = 0; 344a48b9b4eSAlex Deucher list_for_each_entry(crtc, 345a48b9b4eSAlex Deucher &ddev->mode_config.crtc_list, head) { 346a48b9b4eSAlex Deucher radeon_crtc = to_radeon_crtc(crtc); 347a48b9b4eSAlex Deucher if (radeon_crtc->enabled) { 348c913e23aSRafał Miłecki rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 349a48b9b4eSAlex Deucher rdev->pm.active_crtc_count++; 350c913e23aSRafał Miłecki } 351c913e23aSRafał Miłecki } 352c913e23aSRafał Miłecki 353a48b9b4eSAlex Deucher if (rdev->pm.active_crtc_count > 1) { 354c913e23aSRafał Miłecki if (rdev->pm.state == PM_STATE_ACTIVE) { 355c913e23aSRafał Miłecki cancel_delayed_work(&rdev->pm.idle_work); 356c913e23aSRafał Miłecki 357c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_PAUSED; 358c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_UPCLOCK; 3592aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, false); 360c913e23aSRafał Miłecki 361c913e23aSRafał Miłecki DRM_DEBUG("radeon: dynamic power management deactivated\n"); 362c913e23aSRafał Miłecki } 363a48b9b4eSAlex Deucher } else if (rdev->pm.active_crtc_count == 1) { 364c913e23aSRafał Miłecki /* TODO: Increase clocks if needed for current mode */ 365c913e23aSRafał Miłecki 366c913e23aSRafał Miłecki if (rdev->pm.state == PM_STATE_MINIMUM) { 367c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_ACTIVE; 368c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_UPCLOCK; 3692aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, false); 370c913e23aSRafał Miłecki 371c913e23aSRafał Miłecki queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 372c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 373a48b9b4eSAlex Deucher } else if (rdev->pm.state == PM_STATE_PAUSED) { 374c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_ACTIVE; 375c913e23aSRafał Miłecki queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 376c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 377c913e23aSRafał Miłecki DRM_DEBUG("radeon: dynamic power management activated\n"); 378c913e23aSRafał Miłecki } 379a48b9b4eSAlex Deucher } else { /* count == 0 */ 380c913e23aSRafał Miłecki if (rdev->pm.state != PM_STATE_MINIMUM) { 381c913e23aSRafał Miłecki cancel_delayed_work(&rdev->pm.idle_work); 382c913e23aSRafał Miłecki 383c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_MINIMUM; 384c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_MINIMUM; 3852aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, false); 38673a6d3fcSRafał Miłecki } 387c913e23aSRafał Miłecki } 388c913e23aSRafał Miłecki 389c913e23aSRafał Miłecki mutex_unlock(&rdev->pm.mutex); 390c913e23aSRafał Miłecki } 391c913e23aSRafał Miłecki 392f81f2024SMatthew Garrett bool radeon_pm_in_vbl(struct radeon_device *rdev) 393f735261bSDave Airlie { 394539d2418SAlex Deucher u32 stat_crtc = 0, vbl = 0, position = 0; 395f735261bSDave Airlie bool in_vbl = true; 396f735261bSDave Airlie 397bae6b562SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 398f735261bSDave Airlie if (rdev->pm.active_crtcs & (1 << 0)) { 399539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 400539d2418SAlex Deucher EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; 401539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 402539d2418SAlex Deucher EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; 403f735261bSDave Airlie } 404f735261bSDave Airlie if (rdev->pm.active_crtcs & (1 << 1)) { 405539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 406539d2418SAlex Deucher EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; 407539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 408539d2418SAlex Deucher EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; 409bae6b562SAlex Deucher } 410bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 2)) { 411539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 412539d2418SAlex Deucher EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; 413539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 414539d2418SAlex Deucher EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; 415bae6b562SAlex Deucher } 416bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 3)) { 417539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 418539d2418SAlex Deucher EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; 419539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 420539d2418SAlex Deucher EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; 421bae6b562SAlex Deucher } 422bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 4)) { 423539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 424539d2418SAlex Deucher EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; 425539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 426539d2418SAlex Deucher EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; 427bae6b562SAlex Deucher } 428bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 5)) { 429539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 430539d2418SAlex Deucher EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; 431539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 432539d2418SAlex Deucher EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; 433bae6b562SAlex Deucher } 434bae6b562SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 435bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 0)) { 436539d2418SAlex Deucher vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; 437539d2418SAlex Deucher position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; 438bae6b562SAlex Deucher } 439bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 1)) { 440539d2418SAlex Deucher vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; 441539d2418SAlex Deucher position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; 442bae6b562SAlex Deucher } 443539d2418SAlex Deucher if (position < vbl && position > 1) 444539d2418SAlex Deucher in_vbl = false; 445bae6b562SAlex Deucher } else { 446bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 0)) { 447bae6b562SAlex Deucher stat_crtc = RREG32(RADEON_CRTC_STATUS); 448bae6b562SAlex Deucher if (!(stat_crtc & 1)) 449bae6b562SAlex Deucher in_vbl = false; 450bae6b562SAlex Deucher } 451bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 1)) { 452bae6b562SAlex Deucher stat_crtc = RREG32(RADEON_CRTC2_STATUS); 453bae6b562SAlex Deucher if (!(stat_crtc & 1)) 454f735261bSDave Airlie in_vbl = false; 455f735261bSDave Airlie } 456f735261bSDave Airlie } 457f81f2024SMatthew Garrett 458539d2418SAlex Deucher if (position < vbl && position > 1) 459539d2418SAlex Deucher in_vbl = false; 460539d2418SAlex Deucher 461f81f2024SMatthew Garrett return in_vbl; 462f81f2024SMatthew Garrett } 463f81f2024SMatthew Garrett 464f81f2024SMatthew Garrett bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 465f81f2024SMatthew Garrett { 466f81f2024SMatthew Garrett u32 stat_crtc = 0; 467f81f2024SMatthew Garrett bool in_vbl = radeon_pm_in_vbl(rdev); 468f81f2024SMatthew Garrett 469f735261bSDave Airlie if (in_vbl == false) 470bae6b562SAlex Deucher DRM_INFO("not in vbl for pm change %08x at %s\n", stat_crtc, 471bae6b562SAlex Deucher finish ? "exit" : "entry"); 472f735261bSDave Airlie return in_vbl; 473f735261bSDave Airlie } 474c913e23aSRafał Miłecki 475c913e23aSRafał Miłecki static void radeon_pm_idle_work_handler(struct work_struct *work) 476c913e23aSRafał Miłecki { 477c913e23aSRafał Miłecki struct radeon_device *rdev; 478d9932a32SMatthew Garrett int resched; 479c913e23aSRafał Miłecki rdev = container_of(work, struct radeon_device, 480c913e23aSRafał Miłecki pm.idle_work.work); 481c913e23aSRafał Miłecki 482d9932a32SMatthew Garrett resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 483c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 48473a6d3fcSRafał Miłecki if (rdev->pm.state == PM_STATE_ACTIVE) { 485c913e23aSRafał Miłecki unsigned long irq_flags; 486c913e23aSRafał Miłecki int not_processed = 0; 487c913e23aSRafał Miłecki 488c913e23aSRafał Miłecki read_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 489c913e23aSRafał Miłecki if (!list_empty(&rdev->fence_drv.emited)) { 490c913e23aSRafał Miłecki struct list_head *ptr; 491c913e23aSRafał Miłecki list_for_each(ptr, &rdev->fence_drv.emited) { 492c913e23aSRafał Miłecki /* count up to 3, that's enought info */ 493c913e23aSRafał Miłecki if (++not_processed >= 3) 494c913e23aSRafał Miłecki break; 495c913e23aSRafał Miłecki } 496c913e23aSRafał Miłecki } 497c913e23aSRafał Miłecki read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 498c913e23aSRafał Miłecki 499c913e23aSRafał Miłecki if (not_processed >= 3) { /* should upclock */ 500c913e23aSRafał Miłecki if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { 501c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_NONE; 502c913e23aSRafał Miłecki } else if (rdev->pm.planned_action == PM_ACTION_NONE && 503a48b9b4eSAlex Deucher rdev->pm.can_upclock) { 504c913e23aSRafał Miłecki rdev->pm.planned_action = 505c913e23aSRafał Miłecki PM_ACTION_UPCLOCK; 506c913e23aSRafał Miłecki rdev->pm.action_timeout = jiffies + 507c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 508c913e23aSRafał Miłecki } 509c913e23aSRafał Miłecki } else if (not_processed == 0) { /* should downclock */ 510c913e23aSRafał Miłecki if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { 511c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_NONE; 512c913e23aSRafał Miłecki } else if (rdev->pm.planned_action == PM_ACTION_NONE && 513a48b9b4eSAlex Deucher rdev->pm.can_downclock) { 514c913e23aSRafał Miłecki rdev->pm.planned_action = 515c913e23aSRafał Miłecki PM_ACTION_DOWNCLOCK; 516c913e23aSRafał Miłecki rdev->pm.action_timeout = jiffies + 517c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 518c913e23aSRafał Miłecki } 519c913e23aSRafał Miłecki } 520c913e23aSRafał Miłecki 521c913e23aSRafał Miłecki if (rdev->pm.planned_action != PM_ACTION_NONE && 522c913e23aSRafał Miłecki jiffies > rdev->pm.action_timeout) { 5232aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, false); 524c913e23aSRafał Miłecki } 525c913e23aSRafał Miłecki } 526c913e23aSRafał Miłecki mutex_unlock(&rdev->pm.mutex); 527d9932a32SMatthew Garrett ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 528c913e23aSRafał Miłecki 529c913e23aSRafał Miłecki queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 530c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 531c913e23aSRafał Miłecki } 532c913e23aSRafał Miłecki 5337433874eSRafał Miłecki /* 5347433874eSRafał Miłecki * Debugfs info 5357433874eSRafał Miłecki */ 5367433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 5377433874eSRafał Miłecki 5387433874eSRafał Miłecki static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 5397433874eSRafał Miłecki { 5407433874eSRafał Miłecki struct drm_info_node *node = (struct drm_info_node *) m->private; 5417433874eSRafał Miłecki struct drm_device *dev = node->minor->dev; 5427433874eSRafał Miłecki struct radeon_device *rdev = dev->dev_private; 5437433874eSRafał Miłecki 544c913e23aSRafał Miłecki seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); 5456234077dSRafał Miłecki seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 5466234077dSRafał Miłecki seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 5476234077dSRafał Miłecki seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 5486234077dSRafał Miłecki if (rdev->asic->get_memory_clock) 5496234077dSRafał Miłecki seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 550aa5120d2SRafał Miłecki if (rdev->asic->get_pcie_lanes) 551aa5120d2SRafał Miłecki seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 5527433874eSRafał Miłecki 5537433874eSRafał Miłecki return 0; 5547433874eSRafał Miłecki } 5557433874eSRafał Miłecki 5567433874eSRafał Miłecki static struct drm_info_list radeon_pm_info_list[] = { 5577433874eSRafał Miłecki {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 5587433874eSRafał Miłecki }; 5597433874eSRafał Miłecki #endif 5607433874eSRafał Miłecki 561c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev) 5627433874eSRafał Miłecki { 5637433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 5647433874eSRafał Miłecki return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 5657433874eSRafał Miłecki #else 5667433874eSRafał Miłecki return 0; 5677433874eSRafał Miłecki #endif 5687433874eSRafał Miłecki } 569