17433874eSRafał Miłecki /* 27433874eSRafał Miłecki * Permission is hereby granted, free of charge, to any person obtaining a 37433874eSRafał Miłecki * copy of this software and associated documentation files (the "Software"), 47433874eSRafał Miłecki * to deal in the Software without restriction, including without limitation 57433874eSRafał Miłecki * the rights to use, copy, modify, merge, publish, distribute, sublicense, 67433874eSRafał Miłecki * and/or sell copies of the Software, and to permit persons to whom the 77433874eSRafał Miłecki * Software is furnished to do so, subject to the following conditions: 87433874eSRafał Miłecki * 97433874eSRafał Miłecki * The above copyright notice and this permission notice shall be included in 107433874eSRafał Miłecki * all copies or substantial portions of the Software. 117433874eSRafał Miłecki * 127433874eSRafał Miłecki * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 137433874eSRafał Miłecki * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 147433874eSRafał Miłecki * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 157433874eSRafał Miłecki * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 167433874eSRafał Miłecki * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 177433874eSRafał Miłecki * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 187433874eSRafał Miłecki * OTHER DEALINGS IN THE SOFTWARE. 197433874eSRafał Miłecki * 207433874eSRafał Miłecki * Authors: Rafał Miłecki <zajec5@gmail.com> 2156278a8eSAlex Deucher * Alex Deucher <alexdeucher@gmail.com> 227433874eSRafał Miłecki */ 237433874eSRafał Miłecki #include "drmP.h" 247433874eSRafał Miłecki #include "radeon.h" 25f735261bSDave Airlie #include "avivod.h" 267433874eSRafał Miłecki 27c913e23aSRafał Miłecki #define RADEON_IDLE_LOOP_MS 100 28c913e23aSRafał Miłecki #define RADEON_RECLOCK_DELAY_MS 200 2973a6d3fcSRafał Miłecki #define RADEON_WAIT_VBLANK_TIMEOUT 200 302031f77cSAlex Deucher #define RADEON_WAIT_IDLE_TIMEOUT 200 31c913e23aSRafał Miłecki 32c913e23aSRafał Miłecki static void radeon_pm_idle_work_handler(struct work_struct *work); 33c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev); 34c913e23aSRafał Miłecki 355876dd24SMatthew Garrett static void radeon_unmap_vram_bos(struct radeon_device *rdev) 365876dd24SMatthew Garrett { 375876dd24SMatthew Garrett struct radeon_bo *bo, *n; 385876dd24SMatthew Garrett 395876dd24SMatthew Garrett if (list_empty(&rdev->gem.objects)) 405876dd24SMatthew Garrett return; 415876dd24SMatthew Garrett 425876dd24SMatthew Garrett list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 435876dd24SMatthew Garrett if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 445876dd24SMatthew Garrett ttm_bo_unmap_virtual(&bo->tbo); 455876dd24SMatthew Garrett } 465876dd24SMatthew Garrett 475876dd24SMatthew Garrett if (rdev->gart.table.vram.robj) 485876dd24SMatthew Garrett ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo); 495876dd24SMatthew Garrett 505876dd24SMatthew Garrett if (rdev->stollen_vga_memory) 515876dd24SMatthew Garrett ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo); 525876dd24SMatthew Garrett 535876dd24SMatthew Garrett if (rdev->r600_blit.shader_obj) 545876dd24SMatthew Garrett ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo); 555876dd24SMatthew Garrett } 565876dd24SMatthew Garrett 572aba631cSMatthew Garrett static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch) 58a424816fSAlex Deucher { 592aba631cSMatthew Garrett int i; 602aba631cSMatthew Garrett 61*d7311171SAlex Deucher if (rdev->pm.state != PM_STATE_DISABLED) 62c37d230aSMatthew Garrett radeon_get_power_state(rdev, rdev->pm.planned_action); 63c37d230aSMatthew Garrett 64612e06ceSMatthew Garrett mutex_lock(&rdev->ddev->struct_mutex); 65612e06ceSMatthew Garrett mutex_lock(&rdev->vram_mutex); 66a424816fSAlex Deucher mutex_lock(&rdev->cp.mutex); 674f3218cbSAlex Deucher 684f3218cbSAlex Deucher /* gui idle int has issues on older chips it seems */ 694f3218cbSAlex Deucher if (rdev->family >= CHIP_R600) { 70a424816fSAlex Deucher /* wait for GPU idle */ 71a424816fSAlex Deucher rdev->pm.gui_idle = false; 72a424816fSAlex Deucher rdev->irq.gui_idle = true; 73a424816fSAlex Deucher radeon_irq_set(rdev); 74a424816fSAlex Deucher wait_event_interruptible_timeout( 75a424816fSAlex Deucher rdev->irq.idle_queue, rdev->pm.gui_idle, 76a424816fSAlex Deucher msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); 77a424816fSAlex Deucher rdev->irq.gui_idle = false; 78a424816fSAlex Deucher radeon_irq_set(rdev); 7901434b4bSMatthew Garrett } else { 8001434b4bSMatthew Garrett struct radeon_fence *fence; 8101434b4bSMatthew Garrett radeon_ring_alloc(rdev, 64); 8201434b4bSMatthew Garrett radeon_fence_create(rdev, &fence); 8301434b4bSMatthew Garrett radeon_fence_emit(rdev, fence); 8401434b4bSMatthew Garrett radeon_ring_commit(rdev); 8501434b4bSMatthew Garrett radeon_fence_wait(fence, false); 8601434b4bSMatthew Garrett radeon_fence_unref(&fence); 874f3218cbSAlex Deucher } 885876dd24SMatthew Garrett radeon_unmap_vram_bos(rdev); 895876dd24SMatthew Garrett 902aba631cSMatthew Garrett if (!static_switch) { 912aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 922aba631cSMatthew Garrett if (rdev->pm.active_crtcs & (1 << i)) { 932aba631cSMatthew Garrett rdev->pm.req_vblank |= (1 << i); 942aba631cSMatthew Garrett drm_vblank_get(rdev->ddev, i); 952aba631cSMatthew Garrett } 962aba631cSMatthew Garrett } 972aba631cSMatthew Garrett } 982aba631cSMatthew Garrett 992aba631cSMatthew Garrett radeon_set_power_state(rdev, static_switch); 1002aba631cSMatthew Garrett 1012aba631cSMatthew Garrett if (!static_switch) { 1022aba631cSMatthew Garrett for (i = 0; i < rdev->num_crtc; i++) { 1032aba631cSMatthew Garrett if (rdev->pm.req_vblank & (1 << i)) { 1042aba631cSMatthew Garrett rdev->pm.req_vblank &= ~(1 << i); 1052aba631cSMatthew Garrett drm_vblank_put(rdev->ddev, i); 1062aba631cSMatthew Garrett } 1072aba631cSMatthew Garrett } 1082aba631cSMatthew Garrett } 109a424816fSAlex Deucher 110a424816fSAlex Deucher /* update display watermarks based on new power state */ 111a424816fSAlex Deucher radeon_update_bandwidth_info(rdev); 112a424816fSAlex Deucher if (rdev->pm.active_crtc_count) 113a424816fSAlex Deucher radeon_bandwidth_update(rdev); 114a424816fSAlex Deucher 1152aba631cSMatthew Garrett rdev->pm.planned_action = PM_ACTION_NONE; 1162aba631cSMatthew Garrett 117a424816fSAlex Deucher mutex_unlock(&rdev->cp.mutex); 118612e06ceSMatthew Garrett mutex_unlock(&rdev->vram_mutex); 119612e06ceSMatthew Garrett mutex_unlock(&rdev->ddev->struct_mutex); 120a424816fSAlex Deucher } 121a424816fSAlex Deucher 122a424816fSAlex Deucher static ssize_t radeon_get_power_state_static(struct device *dev, 123a424816fSAlex Deucher struct device_attribute *attr, 124a424816fSAlex Deucher char *buf) 125a424816fSAlex Deucher { 126a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 127a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 128a424816fSAlex Deucher 129a424816fSAlex Deucher return snprintf(buf, PAGE_SIZE, "%d.%d\n", rdev->pm.current_power_state_index, 130a424816fSAlex Deucher rdev->pm.current_clock_mode_index); 131a424816fSAlex Deucher } 132a424816fSAlex Deucher 133a424816fSAlex Deucher static ssize_t radeon_set_power_state_static(struct device *dev, 134a424816fSAlex Deucher struct device_attribute *attr, 135a424816fSAlex Deucher const char *buf, 136a424816fSAlex Deucher size_t count) 137a424816fSAlex Deucher { 138a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 139a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 140a424816fSAlex Deucher int ps, cm; 141a424816fSAlex Deucher 142a424816fSAlex Deucher if (sscanf(buf, "%u.%u", &ps, &cm) != 2) { 143a424816fSAlex Deucher DRM_ERROR("Invalid power state!\n"); 144a424816fSAlex Deucher return count; 145a424816fSAlex Deucher } 146a424816fSAlex Deucher 147a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 148a424816fSAlex Deucher if ((ps >= 0) && (ps < rdev->pm.num_power_states) && 149a424816fSAlex Deucher (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) { 150*d7311171SAlex Deucher if ((rdev->pm.active_crtc_count > 0) && 151*d7311171SAlex Deucher (rdev->pm.power_state[ps].clock_info[cm].flags & RADEON_PM_MODE_NO_DISPLAY)) { 152*d7311171SAlex Deucher DRM_ERROR("Invalid power state for display: %d.%d\n", ps, cm); 153*d7311171SAlex Deucher } else if ((rdev->pm.active_crtc_count > 1) && 154*d7311171SAlex Deucher (rdev->pm.power_state[ps].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)) { 155a424816fSAlex Deucher DRM_ERROR("Invalid power state for multi-head: %d.%d\n", ps, cm); 156a424816fSAlex Deucher } else { 157a424816fSAlex Deucher /* disable dynpm */ 158a424816fSAlex Deucher rdev->pm.state = PM_STATE_DISABLED; 159a424816fSAlex Deucher rdev->pm.planned_action = PM_ACTION_NONE; 160a424816fSAlex Deucher rdev->pm.requested_power_state_index = ps; 161a424816fSAlex Deucher rdev->pm.requested_clock_mode_index = cm; 1622aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, true); 163a424816fSAlex Deucher } 164a424816fSAlex Deucher } else 165a424816fSAlex Deucher DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm); 166a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 167a424816fSAlex Deucher 168a424816fSAlex Deucher return count; 169a424816fSAlex Deucher } 170a424816fSAlex Deucher 171a424816fSAlex Deucher static ssize_t radeon_get_dynpm(struct device *dev, 172a424816fSAlex Deucher struct device_attribute *attr, 173a424816fSAlex Deucher char *buf) 174a424816fSAlex Deucher { 175a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 176a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 177a424816fSAlex Deucher 178a424816fSAlex Deucher return snprintf(buf, PAGE_SIZE, "%s\n", 179a424816fSAlex Deucher (rdev->pm.state == PM_STATE_DISABLED) ? "disabled" : "enabled"); 180a424816fSAlex Deucher } 181a424816fSAlex Deucher 182a424816fSAlex Deucher static ssize_t radeon_set_dynpm(struct device *dev, 183a424816fSAlex Deucher struct device_attribute *attr, 184a424816fSAlex Deucher const char *buf, 185a424816fSAlex Deucher size_t count) 186a424816fSAlex Deucher { 187a424816fSAlex Deucher struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 188a424816fSAlex Deucher struct radeon_device *rdev = ddev->dev_private; 189a424816fSAlex Deucher int tmp = simple_strtoul(buf, NULL, 10); 190a424816fSAlex Deucher 191a424816fSAlex Deucher if (tmp == 0) { 192a424816fSAlex Deucher /* update power mode info */ 193a424816fSAlex Deucher radeon_pm_compute_clocks(rdev); 194a424816fSAlex Deucher /* disable dynpm */ 195a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 196a424816fSAlex Deucher rdev->pm.state = PM_STATE_DISABLED; 197a424816fSAlex Deucher rdev->pm.planned_action = PM_ACTION_NONE; 198a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 199a424816fSAlex Deucher DRM_INFO("radeon: dynamic power management disabled\n"); 200a424816fSAlex Deucher } else if (tmp == 1) { 201a424816fSAlex Deucher if (rdev->pm.num_power_states > 1) { 202a424816fSAlex Deucher /* enable dynpm */ 203a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 204a424816fSAlex Deucher rdev->pm.state = PM_STATE_PAUSED; 205a424816fSAlex Deucher rdev->pm.planned_action = PM_ACTION_DEFAULT; 206a424816fSAlex Deucher radeon_get_power_state(rdev, rdev->pm.planned_action); 207a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 208a424816fSAlex Deucher /* update power mode info */ 209a424816fSAlex Deucher radeon_pm_compute_clocks(rdev); 210a424816fSAlex Deucher DRM_INFO("radeon: dynamic power management enabled\n"); 211a424816fSAlex Deucher } else 212a424816fSAlex Deucher DRM_ERROR("dynpm not valid on this system\n"); 213a424816fSAlex Deucher } else 214a424816fSAlex Deucher DRM_ERROR("Invalid setting: %d\n", tmp); 215a424816fSAlex Deucher 216a424816fSAlex Deucher return count; 217a424816fSAlex Deucher } 218a424816fSAlex Deucher 219a424816fSAlex Deucher static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, radeon_get_power_state_static, radeon_set_power_state_static); 220a424816fSAlex Deucher static DEVICE_ATTR(dynpm, S_IRUGO | S_IWUSR, radeon_get_dynpm, radeon_set_dynpm); 221a424816fSAlex Deucher 222a424816fSAlex Deucher 223c913e23aSRafał Miłecki static const char *pm_state_names[4] = { 224c913e23aSRafał Miłecki "PM_STATE_DISABLED", 225c913e23aSRafał Miłecki "PM_STATE_MINIMUM", 226c913e23aSRafał Miłecki "PM_STATE_PAUSED", 227c913e23aSRafał Miłecki "PM_STATE_ACTIVE" 228c913e23aSRafał Miłecki }; 2297433874eSRafał Miłecki 2300ec0e74fSAlex Deucher static const char *pm_state_types[5] = { 231d91eeb78SAlex Deucher "", 2320ec0e74fSAlex Deucher "Powersave", 2330ec0e74fSAlex Deucher "Battery", 2340ec0e74fSAlex Deucher "Balanced", 2350ec0e74fSAlex Deucher "Performance", 2360ec0e74fSAlex Deucher }; 2370ec0e74fSAlex Deucher 23856278a8eSAlex Deucher static void radeon_print_power_mode_info(struct radeon_device *rdev) 23956278a8eSAlex Deucher { 24056278a8eSAlex Deucher int i, j; 24156278a8eSAlex Deucher bool is_default; 24256278a8eSAlex Deucher 24356278a8eSAlex Deucher DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); 24456278a8eSAlex Deucher for (i = 0; i < rdev->pm.num_power_states; i++) { 245a48b9b4eSAlex Deucher if (rdev->pm.default_power_state_index == i) 24656278a8eSAlex Deucher is_default = true; 24756278a8eSAlex Deucher else 24856278a8eSAlex Deucher is_default = false; 2490ec0e74fSAlex Deucher DRM_INFO("State %d %s %s\n", i, 2500ec0e74fSAlex Deucher pm_state_types[rdev->pm.power_state[i].type], 2510ec0e74fSAlex Deucher is_default ? "(default)" : ""); 25256278a8eSAlex Deucher if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 25379daedc9SAlex Deucher DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].pcie_lanes); 254*d7311171SAlex Deucher if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 255a48b9b4eSAlex Deucher DRM_INFO("\tSingle display only\n"); 25656278a8eSAlex Deucher DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); 25756278a8eSAlex Deucher for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { 25856278a8eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) 25956278a8eSAlex Deucher DRM_INFO("\t\t%d engine: %d\n", 26056278a8eSAlex Deucher j, 26156278a8eSAlex Deucher rdev->pm.power_state[i].clock_info[j].sclk * 10); 26256278a8eSAlex Deucher else 26356278a8eSAlex Deucher DRM_INFO("\t\t%d engine/memory: %d/%d\n", 26456278a8eSAlex Deucher j, 26556278a8eSAlex Deucher rdev->pm.power_state[i].clock_info[j].sclk * 10, 26656278a8eSAlex Deucher rdev->pm.power_state[i].clock_info[j].mclk * 10); 267*d7311171SAlex Deucher if (rdev->pm.power_state[i].clock_info[j].flags & RADEON_PM_MODE_NO_DISPLAY) 268*d7311171SAlex Deucher DRM_INFO("\t\tNo display only\n"); 26956278a8eSAlex Deucher } 27056278a8eSAlex Deucher } 27156278a8eSAlex Deucher } 27256278a8eSAlex Deucher 273bae6b562SAlex Deucher void radeon_sync_with_vblank(struct radeon_device *rdev) 274d0d6cb81SRafał Miłecki { 275d0d6cb81SRafał Miłecki if (rdev->pm.active_crtcs) { 276d0d6cb81SRafał Miłecki rdev->pm.vblank_sync = false; 277d0d6cb81SRafał Miłecki wait_event_timeout( 278d0d6cb81SRafał Miłecki rdev->irq.vblank_queue, rdev->pm.vblank_sync, 279d0d6cb81SRafał Miłecki msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 280d0d6cb81SRafał Miłecki } 281d0d6cb81SRafał Miłecki } 282d0d6cb81SRafał Miłecki 2837433874eSRafał Miłecki int radeon_pm_init(struct radeon_device *rdev) 2847433874eSRafał Miłecki { 285c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_DISABLED; 286c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_NONE; 287a48b9b4eSAlex Deucher rdev->pm.can_upclock = true; 288a48b9b4eSAlex Deucher rdev->pm.can_downclock = true; 289c913e23aSRafał Miłecki 29056278a8eSAlex Deucher if (rdev->bios) { 29156278a8eSAlex Deucher if (rdev->is_atom_bios) 29256278a8eSAlex Deucher radeon_atombios_get_power_modes(rdev); 29356278a8eSAlex Deucher else 29456278a8eSAlex Deucher radeon_combios_get_power_modes(rdev); 29556278a8eSAlex Deucher radeon_print_power_mode_info(rdev); 29656278a8eSAlex Deucher } 29756278a8eSAlex Deucher 2987433874eSRafał Miłecki if (radeon_debugfs_pm_init(rdev)) { 299c142c3e5SRafał Miłecki DRM_ERROR("Failed to register debugfs file for PM!\n"); 3007433874eSRafał Miłecki } 3017433874eSRafał Miłecki 302a424816fSAlex Deucher /* where's the best place to put this? */ 303a424816fSAlex Deucher device_create_file(rdev->dev, &dev_attr_power_state); 304a424816fSAlex Deucher device_create_file(rdev->dev, &dev_attr_dynpm); 305a424816fSAlex Deucher 306c913e23aSRafał Miłecki INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); 307c913e23aSRafał Miłecki 30890c39059SAlex Deucher if ((radeon_dynpm != -1 && radeon_dynpm) && (rdev->pm.num_power_states > 1)) { 309c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_PAUSED; 310c913e23aSRafał Miłecki DRM_INFO("radeon: dynamic power management enabled\n"); 311c913e23aSRafał Miłecki } 312c913e23aSRafał Miłecki 313c913e23aSRafał Miłecki DRM_INFO("radeon: power management initialized\n"); 314c913e23aSRafał Miłecki 3157433874eSRafał Miłecki return 0; 3167433874eSRafał Miłecki } 3177433874eSRafał Miłecki 31829fb52caSAlex Deucher void radeon_pm_fini(struct radeon_device *rdev) 31929fb52caSAlex Deucher { 32058e21dffSAlex Deucher if (rdev->pm.state != PM_STATE_DISABLED) { 32158e21dffSAlex Deucher /* cancel work */ 32258e21dffSAlex Deucher cancel_delayed_work_sync(&rdev->pm.idle_work); 32358e21dffSAlex Deucher /* reset default clocks */ 32458e21dffSAlex Deucher rdev->pm.state = PM_STATE_DISABLED; 32558e21dffSAlex Deucher rdev->pm.planned_action = PM_ACTION_DEFAULT; 326*d7311171SAlex Deucher radeon_pm_set_clocks(rdev, true); 327a424816fSAlex Deucher } else if ((rdev->pm.current_power_state_index != 328a424816fSAlex Deucher rdev->pm.default_power_state_index) || 329a424816fSAlex Deucher (rdev->pm.current_clock_mode_index != 0)) { 330a424816fSAlex Deucher rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 331a424816fSAlex Deucher rdev->pm.requested_clock_mode_index = 0; 332a424816fSAlex Deucher mutex_lock(&rdev->pm.mutex); 3332aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, true); 334a424816fSAlex Deucher mutex_unlock(&rdev->pm.mutex); 33558e21dffSAlex Deucher } 33658e21dffSAlex Deucher 337a424816fSAlex Deucher device_remove_file(rdev->dev, &dev_attr_power_state); 338a424816fSAlex Deucher device_remove_file(rdev->dev, &dev_attr_dynpm); 339a424816fSAlex Deucher 34029fb52caSAlex Deucher if (rdev->pm.i2c_bus) 34129fb52caSAlex Deucher radeon_i2c_destroy(rdev->pm.i2c_bus); 34229fb52caSAlex Deucher } 34329fb52caSAlex Deucher 344c913e23aSRafał Miłecki void radeon_pm_compute_clocks(struct radeon_device *rdev) 345c913e23aSRafał Miłecki { 346c913e23aSRafał Miłecki struct drm_device *ddev = rdev->ddev; 347a48b9b4eSAlex Deucher struct drm_crtc *crtc; 348c913e23aSRafał Miłecki struct radeon_crtc *radeon_crtc; 349c913e23aSRafał Miłecki 350c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 351c913e23aSRafał Miłecki 352c913e23aSRafał Miłecki rdev->pm.active_crtcs = 0; 353a48b9b4eSAlex Deucher rdev->pm.active_crtc_count = 0; 354a48b9b4eSAlex Deucher list_for_each_entry(crtc, 355a48b9b4eSAlex Deucher &ddev->mode_config.crtc_list, head) { 356a48b9b4eSAlex Deucher radeon_crtc = to_radeon_crtc(crtc); 357a48b9b4eSAlex Deucher if (radeon_crtc->enabled) { 358c913e23aSRafał Miłecki rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 359a48b9b4eSAlex Deucher rdev->pm.active_crtc_count++; 360c913e23aSRafał Miłecki } 361c913e23aSRafał Miłecki } 362c913e23aSRafał Miłecki 363*d7311171SAlex Deucher if (rdev->pm.state == PM_STATE_DISABLED) { 364*d7311171SAlex Deucher mutex_unlock(&rdev->pm.mutex); 365*d7311171SAlex Deucher return; 366*d7311171SAlex Deucher } 367*d7311171SAlex Deucher 368*d7311171SAlex Deucher /* Note, radeon_pm_set_clocks is called with static_switch set 369*d7311171SAlex Deucher * to true since we always want to statically set the clocks, 370*d7311171SAlex Deucher * not wait for vbl. 371*d7311171SAlex Deucher */ 372a48b9b4eSAlex Deucher if (rdev->pm.active_crtc_count > 1) { 373c913e23aSRafał Miłecki if (rdev->pm.state == PM_STATE_ACTIVE) { 374c913e23aSRafał Miłecki cancel_delayed_work(&rdev->pm.idle_work); 375c913e23aSRafał Miłecki 376c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_PAUSED; 377*d7311171SAlex Deucher rdev->pm.planned_action = PM_ACTION_DEFAULT; 378*d7311171SAlex Deucher radeon_pm_set_clocks(rdev, true); 379c913e23aSRafał Miłecki 380c913e23aSRafał Miłecki DRM_DEBUG("radeon: dynamic power management deactivated\n"); 381c913e23aSRafał Miłecki } 382a48b9b4eSAlex Deucher } else if (rdev->pm.active_crtc_count == 1) { 383c913e23aSRafał Miłecki /* TODO: Increase clocks if needed for current mode */ 384c913e23aSRafał Miłecki 385c913e23aSRafał Miłecki if (rdev->pm.state == PM_STATE_MINIMUM) { 386c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_ACTIVE; 387c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_UPCLOCK; 388*d7311171SAlex Deucher radeon_pm_set_clocks(rdev, true); 389c913e23aSRafał Miłecki 390c913e23aSRafał Miłecki queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 391c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 392a48b9b4eSAlex Deucher } else if (rdev->pm.state == PM_STATE_PAUSED) { 393c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_ACTIVE; 394c913e23aSRafał Miłecki queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 395c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 396c913e23aSRafał Miłecki DRM_DEBUG("radeon: dynamic power management activated\n"); 397c913e23aSRafał Miłecki } 398a48b9b4eSAlex Deucher } else { /* count == 0 */ 399c913e23aSRafał Miłecki if (rdev->pm.state != PM_STATE_MINIMUM) { 400c913e23aSRafał Miłecki cancel_delayed_work(&rdev->pm.idle_work); 401c913e23aSRafał Miłecki 402c913e23aSRafał Miłecki rdev->pm.state = PM_STATE_MINIMUM; 403c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_MINIMUM; 404*d7311171SAlex Deucher radeon_pm_set_clocks(rdev, true); 40573a6d3fcSRafał Miłecki } 406c913e23aSRafał Miłecki } 407c913e23aSRafał Miłecki 408c913e23aSRafał Miłecki mutex_unlock(&rdev->pm.mutex); 409c913e23aSRafał Miłecki } 410c913e23aSRafał Miłecki 411f81f2024SMatthew Garrett bool radeon_pm_in_vbl(struct radeon_device *rdev) 412f735261bSDave Airlie { 413539d2418SAlex Deucher u32 stat_crtc = 0, vbl = 0, position = 0; 414f735261bSDave Airlie bool in_vbl = true; 415f735261bSDave Airlie 416bae6b562SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 417f735261bSDave Airlie if (rdev->pm.active_crtcs & (1 << 0)) { 418539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 419539d2418SAlex Deucher EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; 420539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 421539d2418SAlex Deucher EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; 422f735261bSDave Airlie } 423f735261bSDave Airlie if (rdev->pm.active_crtcs & (1 << 1)) { 424539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 425539d2418SAlex Deucher EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; 426539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 427539d2418SAlex Deucher EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; 428bae6b562SAlex Deucher } 429bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 2)) { 430539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 431539d2418SAlex Deucher EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; 432539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 433539d2418SAlex Deucher EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; 434bae6b562SAlex Deucher } 435bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 3)) { 436539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 437539d2418SAlex Deucher EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; 438539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 439539d2418SAlex Deucher EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; 440bae6b562SAlex Deucher } 441bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 4)) { 442539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 443539d2418SAlex Deucher EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; 444539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 445539d2418SAlex Deucher EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; 446bae6b562SAlex Deucher } 447bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 5)) { 448539d2418SAlex Deucher vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 449539d2418SAlex Deucher EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; 450539d2418SAlex Deucher position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 451539d2418SAlex Deucher EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; 452bae6b562SAlex Deucher } 453bae6b562SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 454bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 0)) { 455539d2418SAlex Deucher vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; 456539d2418SAlex Deucher position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; 457bae6b562SAlex Deucher } 458bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 1)) { 459539d2418SAlex Deucher vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; 460539d2418SAlex Deucher position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; 461bae6b562SAlex Deucher } 462539d2418SAlex Deucher if (position < vbl && position > 1) 463539d2418SAlex Deucher in_vbl = false; 464bae6b562SAlex Deucher } else { 465bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 0)) { 466bae6b562SAlex Deucher stat_crtc = RREG32(RADEON_CRTC_STATUS); 467bae6b562SAlex Deucher if (!(stat_crtc & 1)) 468bae6b562SAlex Deucher in_vbl = false; 469bae6b562SAlex Deucher } 470bae6b562SAlex Deucher if (rdev->pm.active_crtcs & (1 << 1)) { 471bae6b562SAlex Deucher stat_crtc = RREG32(RADEON_CRTC2_STATUS); 472bae6b562SAlex Deucher if (!(stat_crtc & 1)) 473f735261bSDave Airlie in_vbl = false; 474f735261bSDave Airlie } 475f735261bSDave Airlie } 476f81f2024SMatthew Garrett 477539d2418SAlex Deucher if (position < vbl && position > 1) 478539d2418SAlex Deucher in_vbl = false; 479539d2418SAlex Deucher 480f81f2024SMatthew Garrett return in_vbl; 481f81f2024SMatthew Garrett } 482f81f2024SMatthew Garrett 483f81f2024SMatthew Garrett bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 484f81f2024SMatthew Garrett { 485f81f2024SMatthew Garrett u32 stat_crtc = 0; 486f81f2024SMatthew Garrett bool in_vbl = radeon_pm_in_vbl(rdev); 487f81f2024SMatthew Garrett 488f735261bSDave Airlie if (in_vbl == false) 489bae6b562SAlex Deucher DRM_INFO("not in vbl for pm change %08x at %s\n", stat_crtc, 490bae6b562SAlex Deucher finish ? "exit" : "entry"); 491f735261bSDave Airlie return in_vbl; 492f735261bSDave Airlie } 493c913e23aSRafał Miłecki 494c913e23aSRafał Miłecki static void radeon_pm_idle_work_handler(struct work_struct *work) 495c913e23aSRafał Miłecki { 496c913e23aSRafał Miłecki struct radeon_device *rdev; 497d9932a32SMatthew Garrett int resched; 498c913e23aSRafał Miłecki rdev = container_of(work, struct radeon_device, 499c913e23aSRafał Miłecki pm.idle_work.work); 500c913e23aSRafał Miłecki 501d9932a32SMatthew Garrett resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 502c913e23aSRafał Miłecki mutex_lock(&rdev->pm.mutex); 50373a6d3fcSRafał Miłecki if (rdev->pm.state == PM_STATE_ACTIVE) { 504c913e23aSRafał Miłecki unsigned long irq_flags; 505c913e23aSRafał Miłecki int not_processed = 0; 506c913e23aSRafał Miłecki 507c913e23aSRafał Miłecki read_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 508c913e23aSRafał Miłecki if (!list_empty(&rdev->fence_drv.emited)) { 509c913e23aSRafał Miłecki struct list_head *ptr; 510c913e23aSRafał Miłecki list_for_each(ptr, &rdev->fence_drv.emited) { 511c913e23aSRafał Miłecki /* count up to 3, that's enought info */ 512c913e23aSRafał Miłecki if (++not_processed >= 3) 513c913e23aSRafał Miłecki break; 514c913e23aSRafał Miłecki } 515c913e23aSRafał Miłecki } 516c913e23aSRafał Miłecki read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 517c913e23aSRafał Miłecki 518c913e23aSRafał Miłecki if (not_processed >= 3) { /* should upclock */ 519c913e23aSRafał Miłecki if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { 520c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_NONE; 521c913e23aSRafał Miłecki } else if (rdev->pm.planned_action == PM_ACTION_NONE && 522a48b9b4eSAlex Deucher rdev->pm.can_upclock) { 523c913e23aSRafał Miłecki rdev->pm.planned_action = 524c913e23aSRafał Miłecki PM_ACTION_UPCLOCK; 525c913e23aSRafał Miłecki rdev->pm.action_timeout = jiffies + 526c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 527c913e23aSRafał Miłecki } 528c913e23aSRafał Miłecki } else if (not_processed == 0) { /* should downclock */ 529c913e23aSRafał Miłecki if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { 530c913e23aSRafał Miłecki rdev->pm.planned_action = PM_ACTION_NONE; 531c913e23aSRafał Miłecki } else if (rdev->pm.planned_action == PM_ACTION_NONE && 532a48b9b4eSAlex Deucher rdev->pm.can_downclock) { 533c913e23aSRafał Miłecki rdev->pm.planned_action = 534c913e23aSRafał Miłecki PM_ACTION_DOWNCLOCK; 535c913e23aSRafał Miłecki rdev->pm.action_timeout = jiffies + 536c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 537c913e23aSRafał Miłecki } 538c913e23aSRafał Miłecki } 539c913e23aSRafał Miłecki 540*d7311171SAlex Deucher /* Note, radeon_pm_set_clocks is called with static_switch set 541*d7311171SAlex Deucher * to false since we want to wait for vbl to avoid flicker. 542*d7311171SAlex Deucher */ 543c913e23aSRafał Miłecki if (rdev->pm.planned_action != PM_ACTION_NONE && 544c913e23aSRafał Miłecki jiffies > rdev->pm.action_timeout) { 5452aba631cSMatthew Garrett radeon_pm_set_clocks(rdev, false); 546c913e23aSRafał Miłecki } 547c913e23aSRafał Miłecki } 548c913e23aSRafał Miłecki mutex_unlock(&rdev->pm.mutex); 549d9932a32SMatthew Garrett ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 550c913e23aSRafał Miłecki 551c913e23aSRafał Miłecki queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 552c913e23aSRafał Miłecki msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 553c913e23aSRafał Miłecki } 554c913e23aSRafał Miłecki 5557433874eSRafał Miłecki /* 5567433874eSRafał Miłecki * Debugfs info 5577433874eSRafał Miłecki */ 5587433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 5597433874eSRafał Miłecki 5607433874eSRafał Miłecki static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 5617433874eSRafał Miłecki { 5627433874eSRafał Miłecki struct drm_info_node *node = (struct drm_info_node *) m->private; 5637433874eSRafał Miłecki struct drm_device *dev = node->minor->dev; 5647433874eSRafał Miłecki struct radeon_device *rdev = dev->dev_private; 5657433874eSRafał Miłecki 566c913e23aSRafał Miłecki seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); 5676234077dSRafał Miłecki seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 5686234077dSRafał Miłecki seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 5696234077dSRafał Miłecki seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 5706234077dSRafał Miłecki if (rdev->asic->get_memory_clock) 5716234077dSRafał Miłecki seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 572aa5120d2SRafał Miłecki if (rdev->asic->get_pcie_lanes) 573aa5120d2SRafał Miłecki seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 5747433874eSRafał Miłecki 5757433874eSRafał Miłecki return 0; 5767433874eSRafał Miłecki } 5777433874eSRafał Miłecki 5787433874eSRafał Miłecki static struct drm_info_list radeon_pm_info_list[] = { 5797433874eSRafał Miłecki {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 5807433874eSRafał Miłecki }; 5817433874eSRafał Miłecki #endif 5827433874eSRafał Miłecki 583c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev) 5847433874eSRafał Miłecki { 5857433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS) 5867433874eSRafał Miłecki return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 5877433874eSRafał Miłecki #else 5887433874eSRafał Miłecki return 0; 5897433874eSRafał Miłecki #endif 5907433874eSRafał Miłecki } 591