1e098bc96SEvan Quan /* 2e098bc96SEvan Quan * Copyright 2011 Advanced Micro Devices, Inc. 3e098bc96SEvan Quan * 4e098bc96SEvan Quan * Permission is hereby granted, free of charge, to any person obtaining a 5e098bc96SEvan Quan * copy of this software and associated documentation files (the "Software"), 6e098bc96SEvan Quan * to deal in the Software without restriction, including without limitation 7e098bc96SEvan Quan * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8e098bc96SEvan Quan * and/or sell copies of the Software, and to permit persons to whom the 9e098bc96SEvan Quan * Software is furnished to do so, subject to the following conditions: 10e098bc96SEvan Quan * 11e098bc96SEvan Quan * The above copyright notice and this permission notice shall be included in 12e098bc96SEvan Quan * all copies or substantial portions of the Software. 13e098bc96SEvan Quan * 14e098bc96SEvan Quan * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15e098bc96SEvan Quan * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16e098bc96SEvan Quan * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17e098bc96SEvan Quan * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18e098bc96SEvan Quan * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19e098bc96SEvan Quan * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20e098bc96SEvan Quan * OTHER DEALINGS IN THE SOFTWARE. 21e098bc96SEvan Quan * 22e098bc96SEvan Quan * Authors: Alex Deucher 23e098bc96SEvan Quan */ 24e098bc96SEvan Quan 25e098bc96SEvan Quan #include "amdgpu.h" 26e098bc96SEvan Quan #include "amdgpu_atombios.h" 27e098bc96SEvan Quan #include "amdgpu_i2c.h" 28e098bc96SEvan Quan #include "amdgpu_dpm.h" 29e098bc96SEvan Quan #include "atom.h" 30e098bc96SEvan Quan #include "amd_pcie.h" 31e098bc96SEvan Quan #include "amdgpu_display.h" 32e098bc96SEvan Quan #include "hwmgr.h" 33e098bc96SEvan Quan #include <linux/power_supply.h> 34e098bc96SEvan Quan 35e098bc96SEvan Quan #define WIDTH_4K 3840 36e098bc96SEvan Quan 37e098bc96SEvan Quan void amdgpu_dpm_print_class_info(u32 class, u32 class2) 38e098bc96SEvan Quan { 39e098bc96SEvan Quan const char *s; 40e098bc96SEvan Quan 41e098bc96SEvan Quan switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 42e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 43e098bc96SEvan Quan default: 44e098bc96SEvan Quan s = "none"; 45e098bc96SEvan Quan break; 46e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 47e098bc96SEvan Quan s = "battery"; 48e098bc96SEvan Quan break; 49e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 50e098bc96SEvan Quan s = "balanced"; 51e098bc96SEvan Quan break; 52e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 53e098bc96SEvan Quan s = "performance"; 54e098bc96SEvan Quan break; 55e098bc96SEvan Quan } 56e098bc96SEvan Quan printk("\tui class: %s\n", s); 57e098bc96SEvan Quan printk("\tinternal class:"); 58e098bc96SEvan Quan if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 59e098bc96SEvan Quan (class2 == 0)) 60e098bc96SEvan Quan pr_cont(" none"); 61e098bc96SEvan Quan else { 62e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 63e098bc96SEvan Quan pr_cont(" boot"); 64e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 65e098bc96SEvan Quan pr_cont(" thermal"); 66e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 67e098bc96SEvan Quan pr_cont(" limited_pwr"); 68e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_REST) 69e098bc96SEvan Quan pr_cont(" rest"); 70e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 71e098bc96SEvan Quan pr_cont(" forced"); 72e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 73e098bc96SEvan Quan pr_cont(" 3d_perf"); 74e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 75e098bc96SEvan Quan pr_cont(" ovrdrv"); 76e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 77e098bc96SEvan Quan pr_cont(" uvd"); 78e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 79e098bc96SEvan Quan pr_cont(" 3d_low"); 80e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 81e098bc96SEvan Quan pr_cont(" acpi"); 82e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 83e098bc96SEvan Quan pr_cont(" uvd_hd2"); 84e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 85e098bc96SEvan Quan pr_cont(" uvd_hd"); 86e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 87e098bc96SEvan Quan pr_cont(" uvd_sd"); 88e098bc96SEvan Quan if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 89e098bc96SEvan Quan pr_cont(" limited_pwr2"); 90e098bc96SEvan Quan if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 91e098bc96SEvan Quan pr_cont(" ulv"); 92e098bc96SEvan Quan if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 93e098bc96SEvan Quan pr_cont(" uvd_mvc"); 94e098bc96SEvan Quan } 95e098bc96SEvan Quan pr_cont("\n"); 96e098bc96SEvan Quan } 97e098bc96SEvan Quan 98e098bc96SEvan Quan void amdgpu_dpm_print_cap_info(u32 caps) 99e098bc96SEvan Quan { 100e098bc96SEvan Quan printk("\tcaps:"); 101e098bc96SEvan Quan if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 102e098bc96SEvan Quan pr_cont(" single_disp"); 103e098bc96SEvan Quan if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 104e098bc96SEvan Quan pr_cont(" video"); 105e098bc96SEvan Quan if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 106e098bc96SEvan Quan pr_cont(" no_dc"); 107e098bc96SEvan Quan pr_cont("\n"); 108e098bc96SEvan Quan } 109e098bc96SEvan Quan 110e098bc96SEvan Quan void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, 111e098bc96SEvan Quan struct amdgpu_ps *rps) 112e098bc96SEvan Quan { 113e098bc96SEvan Quan printk("\tstatus:"); 114e098bc96SEvan Quan if (rps == adev->pm.dpm.current_ps) 115e098bc96SEvan Quan pr_cont(" c"); 116e098bc96SEvan Quan if (rps == adev->pm.dpm.requested_ps) 117e098bc96SEvan Quan pr_cont(" r"); 118e098bc96SEvan Quan if (rps == adev->pm.dpm.boot_ps) 119e098bc96SEvan Quan pr_cont(" b"); 120e098bc96SEvan Quan pr_cont("\n"); 121e098bc96SEvan Quan } 122e098bc96SEvan Quan 123e098bc96SEvan Quan void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 124e098bc96SEvan Quan { 125e098bc96SEvan Quan struct drm_device *ddev = adev->ddev; 126e098bc96SEvan Quan struct drm_crtc *crtc; 127e098bc96SEvan Quan struct amdgpu_crtc *amdgpu_crtc; 128e098bc96SEvan Quan 129e098bc96SEvan Quan adev->pm.dpm.new_active_crtcs = 0; 130e098bc96SEvan Quan adev->pm.dpm.new_active_crtc_count = 0; 131e098bc96SEvan Quan if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 132e098bc96SEvan Quan list_for_each_entry(crtc, 133e098bc96SEvan Quan &ddev->mode_config.crtc_list, head) { 134e098bc96SEvan Quan amdgpu_crtc = to_amdgpu_crtc(crtc); 135e098bc96SEvan Quan if (amdgpu_crtc->enabled) { 136e098bc96SEvan Quan adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 137e098bc96SEvan Quan adev->pm.dpm.new_active_crtc_count++; 138e098bc96SEvan Quan } 139e098bc96SEvan Quan } 140e098bc96SEvan Quan } 141e098bc96SEvan Quan } 142e098bc96SEvan Quan 143e098bc96SEvan Quan 144e098bc96SEvan Quan u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 145e098bc96SEvan Quan { 146e098bc96SEvan Quan struct drm_device *dev = adev->ddev; 147e098bc96SEvan Quan struct drm_crtc *crtc; 148e098bc96SEvan Quan struct amdgpu_crtc *amdgpu_crtc; 149e098bc96SEvan Quan u32 vblank_in_pixels; 150e098bc96SEvan Quan u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 151e098bc96SEvan Quan 152e098bc96SEvan Quan if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 153e098bc96SEvan Quan list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 154e098bc96SEvan Quan amdgpu_crtc = to_amdgpu_crtc(crtc); 155e098bc96SEvan Quan if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 156e098bc96SEvan Quan vblank_in_pixels = 157e098bc96SEvan Quan amdgpu_crtc->hw_mode.crtc_htotal * 158e098bc96SEvan Quan (amdgpu_crtc->hw_mode.crtc_vblank_end - 159e098bc96SEvan Quan amdgpu_crtc->hw_mode.crtc_vdisplay + 160e098bc96SEvan Quan (amdgpu_crtc->v_border * 2)); 161e098bc96SEvan Quan 162e098bc96SEvan Quan vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 163e098bc96SEvan Quan break; 164e098bc96SEvan Quan } 165e098bc96SEvan Quan } 166e098bc96SEvan Quan } 167e098bc96SEvan Quan 168e098bc96SEvan Quan return vblank_time_us; 169e098bc96SEvan Quan } 170e098bc96SEvan Quan 171e098bc96SEvan Quan u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 172e098bc96SEvan Quan { 173e098bc96SEvan Quan struct drm_device *dev = adev->ddev; 174e098bc96SEvan Quan struct drm_crtc *crtc; 175e098bc96SEvan Quan struct amdgpu_crtc *amdgpu_crtc; 176e098bc96SEvan Quan u32 vrefresh = 0; 177e098bc96SEvan Quan 178e098bc96SEvan Quan if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 179e098bc96SEvan Quan list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 180e098bc96SEvan Quan amdgpu_crtc = to_amdgpu_crtc(crtc); 181e098bc96SEvan Quan if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 182e098bc96SEvan Quan vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 183e098bc96SEvan Quan break; 184e098bc96SEvan Quan } 185e098bc96SEvan Quan } 186e098bc96SEvan Quan } 187e098bc96SEvan Quan 188e098bc96SEvan Quan return vrefresh; 189e098bc96SEvan Quan } 190e098bc96SEvan Quan 191e098bc96SEvan Quan bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) 192e098bc96SEvan Quan { 193e098bc96SEvan Quan switch (sensor) { 194e098bc96SEvan Quan case THERMAL_TYPE_RV6XX: 195e098bc96SEvan Quan case THERMAL_TYPE_RV770: 196e098bc96SEvan Quan case THERMAL_TYPE_EVERGREEN: 197e098bc96SEvan Quan case THERMAL_TYPE_SUMO: 198e098bc96SEvan Quan case THERMAL_TYPE_NI: 199e098bc96SEvan Quan case THERMAL_TYPE_SI: 200e098bc96SEvan Quan case THERMAL_TYPE_CI: 201e098bc96SEvan Quan case THERMAL_TYPE_KV: 202e098bc96SEvan Quan return true; 203e098bc96SEvan Quan case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 204e098bc96SEvan Quan case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 205e098bc96SEvan Quan return false; /* need special handling */ 206e098bc96SEvan Quan case THERMAL_TYPE_NONE: 207e098bc96SEvan Quan case THERMAL_TYPE_EXTERNAL: 208e098bc96SEvan Quan case THERMAL_TYPE_EXTERNAL_GPIO: 209e098bc96SEvan Quan default: 210e098bc96SEvan Quan return false; 211e098bc96SEvan Quan } 212e098bc96SEvan Quan } 213e098bc96SEvan Quan 214e098bc96SEvan Quan union power_info { 215e098bc96SEvan Quan struct _ATOM_POWERPLAY_INFO info; 216e098bc96SEvan Quan struct _ATOM_POWERPLAY_INFO_V2 info_2; 217e098bc96SEvan Quan struct _ATOM_POWERPLAY_INFO_V3 info_3; 218e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 219e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 220e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 221e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 222e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 223e098bc96SEvan Quan }; 224e098bc96SEvan Quan 225e098bc96SEvan Quan union fan_info { 226e098bc96SEvan Quan struct _ATOM_PPLIB_FANTABLE fan; 227e098bc96SEvan Quan struct _ATOM_PPLIB_FANTABLE2 fan2; 228e098bc96SEvan Quan struct _ATOM_PPLIB_FANTABLE3 fan3; 229e098bc96SEvan Quan }; 230e098bc96SEvan Quan 231e098bc96SEvan Quan static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, 232e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 233e098bc96SEvan Quan { 234e098bc96SEvan Quan u32 size = atom_table->ucNumEntries * 235e098bc96SEvan Quan sizeof(struct amdgpu_clock_voltage_dependency_entry); 236e098bc96SEvan Quan int i; 237e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 238e098bc96SEvan Quan 239e098bc96SEvan Quan amdgpu_table->entries = kzalloc(size, GFP_KERNEL); 240e098bc96SEvan Quan if (!amdgpu_table->entries) 241e098bc96SEvan Quan return -ENOMEM; 242e098bc96SEvan Quan 243e098bc96SEvan Quan entry = &atom_table->entries[0]; 244e098bc96SEvan Quan for (i = 0; i < atom_table->ucNumEntries; i++) { 245e098bc96SEvan Quan amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 246e098bc96SEvan Quan (entry->ucClockHigh << 16); 247e098bc96SEvan Quan amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); 248e098bc96SEvan Quan entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 249e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 250e098bc96SEvan Quan } 251e098bc96SEvan Quan amdgpu_table->count = atom_table->ucNumEntries; 252e098bc96SEvan Quan 253e098bc96SEvan Quan return 0; 254e098bc96SEvan Quan } 255e098bc96SEvan Quan 256e098bc96SEvan Quan int amdgpu_get_platform_caps(struct amdgpu_device *adev) 257e098bc96SEvan Quan { 258e098bc96SEvan Quan struct amdgpu_mode_info *mode_info = &adev->mode_info; 259e098bc96SEvan Quan union power_info *power_info; 260e098bc96SEvan Quan int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 261e098bc96SEvan Quan u16 data_offset; 262e098bc96SEvan Quan u8 frev, crev; 263e098bc96SEvan Quan 264e098bc96SEvan Quan if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 265e098bc96SEvan Quan &frev, &crev, &data_offset)) 266e098bc96SEvan Quan return -EINVAL; 267e098bc96SEvan Quan power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 268e098bc96SEvan Quan 269e098bc96SEvan Quan adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 270e098bc96SEvan Quan adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 271e098bc96SEvan Quan adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 272e098bc96SEvan Quan 273e098bc96SEvan Quan return 0; 274e098bc96SEvan Quan } 275e098bc96SEvan Quan 276e098bc96SEvan Quan /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 277e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 278e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 279e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 280e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 281e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 282e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 283e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 284e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 285e098bc96SEvan Quan 286e098bc96SEvan Quan int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) 287e098bc96SEvan Quan { 288e098bc96SEvan Quan struct amdgpu_mode_info *mode_info = &adev->mode_info; 289e098bc96SEvan Quan union power_info *power_info; 290e098bc96SEvan Quan union fan_info *fan_info; 291e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 292e098bc96SEvan Quan int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 293e098bc96SEvan Quan u16 data_offset; 294e098bc96SEvan Quan u8 frev, crev; 295e098bc96SEvan Quan int ret, i; 296e098bc96SEvan Quan 297e098bc96SEvan Quan if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 298e098bc96SEvan Quan &frev, &crev, &data_offset)) 299e098bc96SEvan Quan return -EINVAL; 300e098bc96SEvan Quan power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 301e098bc96SEvan Quan 302e098bc96SEvan Quan /* fan table */ 303e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 304e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 305e098bc96SEvan Quan if (power_info->pplib3.usFanTableOffset) { 306e098bc96SEvan Quan fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 307e098bc96SEvan Quan le16_to_cpu(power_info->pplib3.usFanTableOffset)); 308e098bc96SEvan Quan adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 309e098bc96SEvan Quan adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 310e098bc96SEvan Quan adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 311e098bc96SEvan Quan adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 312e098bc96SEvan Quan adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 313e098bc96SEvan Quan adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 314e098bc96SEvan Quan adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 315e098bc96SEvan Quan if (fan_info->fan.ucFanTableFormat >= 2) 316e098bc96SEvan Quan adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 317e098bc96SEvan Quan else 318e098bc96SEvan Quan adev->pm.dpm.fan.t_max = 10900; 319e098bc96SEvan Quan adev->pm.dpm.fan.cycle_delay = 100000; 320e098bc96SEvan Quan if (fan_info->fan.ucFanTableFormat >= 3) { 321e098bc96SEvan Quan adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; 322e098bc96SEvan Quan adev->pm.dpm.fan.default_max_fan_pwm = 323e098bc96SEvan Quan le16_to_cpu(fan_info->fan3.usFanPWMMax); 324e098bc96SEvan Quan adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; 325e098bc96SEvan Quan adev->pm.dpm.fan.fan_output_sensitivity = 326e098bc96SEvan Quan le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); 327e098bc96SEvan Quan } 328e098bc96SEvan Quan adev->pm.dpm.fan.ucode_fan_control = true; 329e098bc96SEvan Quan } 330e098bc96SEvan Quan } 331e098bc96SEvan Quan 332e098bc96SEvan Quan /* clock dependancy tables, shedding tables */ 333e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 334e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 335e098bc96SEvan Quan if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 336e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 337e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 338e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 339e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 340e098bc96SEvan Quan dep_table); 341e098bc96SEvan Quan if (ret) { 342e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 343e098bc96SEvan Quan return ret; 344e098bc96SEvan Quan } 345e098bc96SEvan Quan } 346e098bc96SEvan Quan if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 347e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 348e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 349e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 350e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 351e098bc96SEvan Quan dep_table); 352e098bc96SEvan Quan if (ret) { 353e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 354e098bc96SEvan Quan return ret; 355e098bc96SEvan Quan } 356e098bc96SEvan Quan } 357e098bc96SEvan Quan if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 358e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 359e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 360e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 361e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 362e098bc96SEvan Quan dep_table); 363e098bc96SEvan Quan if (ret) { 364e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 365e098bc96SEvan Quan return ret; 366e098bc96SEvan Quan } 367e098bc96SEvan Quan } 368e098bc96SEvan Quan if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 369e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 370e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 371e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 372e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 373e098bc96SEvan Quan dep_table); 374e098bc96SEvan Quan if (ret) { 375e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 376e098bc96SEvan Quan return ret; 377e098bc96SEvan Quan } 378e098bc96SEvan Quan } 379e098bc96SEvan Quan if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 380e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 381e098bc96SEvan Quan (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 382e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 383e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 384e098bc96SEvan Quan if (clk_v->ucNumEntries) { 385e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 386e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usSclkLow) | 387e098bc96SEvan Quan (clk_v->entries[0].ucSclkHigh << 16); 388e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 389e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usMclkLow) | 390e098bc96SEvan Quan (clk_v->entries[0].ucMclkHigh << 16); 391e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 392e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usVddc); 393e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 394e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usVddci); 395e098bc96SEvan Quan } 396e098bc96SEvan Quan } 397e098bc96SEvan Quan if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 398e098bc96SEvan Quan ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 399e098bc96SEvan Quan (ATOM_PPLIB_PhaseSheddingLimits_Table *) 400e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 401e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 402e098bc96SEvan Quan ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 403e098bc96SEvan Quan 404e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 405e098bc96SEvan Quan kcalloc(psl->ucNumEntries, 406e098bc96SEvan Quan sizeof(struct amdgpu_phase_shedding_limits_entry), 407e098bc96SEvan Quan GFP_KERNEL); 408e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 409e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 410e098bc96SEvan Quan return -ENOMEM; 411e098bc96SEvan Quan } 412e098bc96SEvan Quan 413e098bc96SEvan Quan entry = &psl->entries[0]; 414e098bc96SEvan Quan for (i = 0; i < psl->ucNumEntries; i++) { 415e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 416e098bc96SEvan Quan le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 417e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 418e098bc96SEvan Quan le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 419e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 420e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 421e098bc96SEvan Quan entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 422e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 423e098bc96SEvan Quan } 424e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 425e098bc96SEvan Quan psl->ucNumEntries; 426e098bc96SEvan Quan } 427e098bc96SEvan Quan } 428e098bc96SEvan Quan 429e098bc96SEvan Quan /* cac data */ 430e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 431e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 432e098bc96SEvan Quan adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 433e098bc96SEvan Quan adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 434e098bc96SEvan Quan adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; 435e098bc96SEvan Quan adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 436e098bc96SEvan Quan if (adev->pm.dpm.tdp_od_limit) 437e098bc96SEvan Quan adev->pm.dpm.power_control = true; 438e098bc96SEvan Quan else 439e098bc96SEvan Quan adev->pm.dpm.power_control = false; 440e098bc96SEvan Quan adev->pm.dpm.tdp_adjustment = 0; 441e098bc96SEvan Quan adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 442e098bc96SEvan Quan adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 443e098bc96SEvan Quan adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 444e098bc96SEvan Quan if (power_info->pplib5.usCACLeakageTableOffset) { 445e098bc96SEvan Quan ATOM_PPLIB_CAC_Leakage_Table *cac_table = 446e098bc96SEvan Quan (ATOM_PPLIB_CAC_Leakage_Table *) 447e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 448e098bc96SEvan Quan le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 449e098bc96SEvan Quan ATOM_PPLIB_CAC_Leakage_Record *entry; 450e098bc96SEvan Quan u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); 451e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 452e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { 453e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 454e098bc96SEvan Quan return -ENOMEM; 455e098bc96SEvan Quan } 456e098bc96SEvan Quan entry = &cac_table->entries[0]; 457e098bc96SEvan Quan for (i = 0; i < cac_table->ucNumEntries; i++) { 458e098bc96SEvan Quan if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 459e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 460e098bc96SEvan Quan le16_to_cpu(entry->usVddc1); 461e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 462e098bc96SEvan Quan le16_to_cpu(entry->usVddc2); 463e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 464e098bc96SEvan Quan le16_to_cpu(entry->usVddc3); 465e098bc96SEvan Quan } else { 466e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 467e098bc96SEvan Quan le16_to_cpu(entry->usVddc); 468e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 469e098bc96SEvan Quan le32_to_cpu(entry->ulLeakageValue); 470e098bc96SEvan Quan } 471e098bc96SEvan Quan entry = (ATOM_PPLIB_CAC_Leakage_Record *) 472e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 473e098bc96SEvan Quan } 474e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 475e098bc96SEvan Quan } 476e098bc96SEvan Quan } 477e098bc96SEvan Quan 478e098bc96SEvan Quan /* ext tables */ 479e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 480e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 481e098bc96SEvan Quan ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 482e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 483e098bc96SEvan Quan le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 484e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 485e098bc96SEvan Quan ext_hdr->usVCETableOffset) { 486e098bc96SEvan Quan VCEClockInfoArray *array = (VCEClockInfoArray *) 487e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 488e098bc96SEvan Quan le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 489e098bc96SEvan Quan ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 490e098bc96SEvan Quan (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 491e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 492e098bc96SEvan Quan le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 493e098bc96SEvan Quan 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 494e098bc96SEvan Quan ATOM_PPLIB_VCE_State_Table *states = 495e098bc96SEvan Quan (ATOM_PPLIB_VCE_State_Table *) 496e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 497e098bc96SEvan Quan le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 498e098bc96SEvan Quan 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 499e098bc96SEvan Quan 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 500e098bc96SEvan Quan ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 501e098bc96SEvan Quan ATOM_PPLIB_VCE_State_Record *state_entry; 502e098bc96SEvan Quan VCEClockInfo *vce_clk; 503e098bc96SEvan Quan u32 size = limits->numEntries * 504e098bc96SEvan Quan sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); 505e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 506e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 507e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 508e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 509e098bc96SEvan Quan return -ENOMEM; 510e098bc96SEvan Quan } 511e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 512e098bc96SEvan Quan limits->numEntries; 513e098bc96SEvan Quan entry = &limits->entries[0]; 514e098bc96SEvan Quan state_entry = &states->entries[0]; 515e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 516e098bc96SEvan Quan vce_clk = (VCEClockInfo *) 517e098bc96SEvan Quan ((u8 *)&array->entries[0] + 518e098bc96SEvan Quan (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 519e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 520e098bc96SEvan Quan le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 521e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 522e098bc96SEvan Quan le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 523e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 524e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 525e098bc96SEvan Quan entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 526e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 527e098bc96SEvan Quan } 528e098bc96SEvan Quan adev->pm.dpm.num_of_vce_states = 529e098bc96SEvan Quan states->numEntries > AMD_MAX_VCE_LEVELS ? 530e098bc96SEvan Quan AMD_MAX_VCE_LEVELS : states->numEntries; 531e098bc96SEvan Quan for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 532e098bc96SEvan Quan vce_clk = (VCEClockInfo *) 533e098bc96SEvan Quan ((u8 *)&array->entries[0] + 534e098bc96SEvan Quan (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 535e098bc96SEvan Quan adev->pm.dpm.vce_states[i].evclk = 536e098bc96SEvan Quan le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 537e098bc96SEvan Quan adev->pm.dpm.vce_states[i].ecclk = 538e098bc96SEvan Quan le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 539e098bc96SEvan Quan adev->pm.dpm.vce_states[i].clk_idx = 540e098bc96SEvan Quan state_entry->ucClockInfoIndex & 0x3f; 541e098bc96SEvan Quan adev->pm.dpm.vce_states[i].pstate = 542e098bc96SEvan Quan (state_entry->ucClockInfoIndex & 0xc0) >> 6; 543e098bc96SEvan Quan state_entry = (ATOM_PPLIB_VCE_State_Record *) 544e098bc96SEvan Quan ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 545e098bc96SEvan Quan } 546e098bc96SEvan Quan } 547e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 548e098bc96SEvan Quan ext_hdr->usUVDTableOffset) { 549e098bc96SEvan Quan UVDClockInfoArray *array = (UVDClockInfoArray *) 550e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 551e098bc96SEvan Quan le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 552e098bc96SEvan Quan ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 553e098bc96SEvan Quan (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 554e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 555e098bc96SEvan Quan le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 556e098bc96SEvan Quan 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 557e098bc96SEvan Quan ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 558e098bc96SEvan Quan u32 size = limits->numEntries * 559e098bc96SEvan Quan sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); 560e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 561e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 562e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 563e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 564e098bc96SEvan Quan return -ENOMEM; 565e098bc96SEvan Quan } 566e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 567e098bc96SEvan Quan limits->numEntries; 568e098bc96SEvan Quan entry = &limits->entries[0]; 569e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 570e098bc96SEvan Quan UVDClockInfo *uvd_clk = (UVDClockInfo *) 571e098bc96SEvan Quan ((u8 *)&array->entries[0] + 572e098bc96SEvan Quan (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 573e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 574e098bc96SEvan Quan le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 575e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 576e098bc96SEvan Quan le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 577e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 578e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 579e098bc96SEvan Quan entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 580e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 581e098bc96SEvan Quan } 582e098bc96SEvan Quan } 583e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 584e098bc96SEvan Quan ext_hdr->usSAMUTableOffset) { 585e098bc96SEvan Quan ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 586e098bc96SEvan Quan (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 587e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 588e098bc96SEvan Quan le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 589e098bc96SEvan Quan ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 590e098bc96SEvan Quan u32 size = limits->numEntries * 591e098bc96SEvan Quan sizeof(struct amdgpu_clock_voltage_dependency_entry); 592e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 593e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 594e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 595e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 596e098bc96SEvan Quan return -ENOMEM; 597e098bc96SEvan Quan } 598e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 599e098bc96SEvan Quan limits->numEntries; 600e098bc96SEvan Quan entry = &limits->entries[0]; 601e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 602e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 603e098bc96SEvan Quan le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 604e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 605e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 606e098bc96SEvan Quan entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 607e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 608e098bc96SEvan Quan } 609e098bc96SEvan Quan } 610e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 611e098bc96SEvan Quan ext_hdr->usPPMTableOffset) { 612e098bc96SEvan Quan ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 613e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 614e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPPMTableOffset)); 615e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table = 616e098bc96SEvan Quan kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); 617e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.ppm_table) { 618e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 619e098bc96SEvan Quan return -ENOMEM; 620e098bc96SEvan Quan } 621e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 622e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 623e098bc96SEvan Quan le16_to_cpu(ppm->usCpuCoreNumber); 624e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->platform_tdp = 625e098bc96SEvan Quan le32_to_cpu(ppm->ulPlatformTDP); 626e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 627e098bc96SEvan Quan le32_to_cpu(ppm->ulSmallACPlatformTDP); 628e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->platform_tdc = 629e098bc96SEvan Quan le32_to_cpu(ppm->ulPlatformTDC); 630e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 631e098bc96SEvan Quan le32_to_cpu(ppm->ulSmallACPlatformTDC); 632e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->apu_tdp = 633e098bc96SEvan Quan le32_to_cpu(ppm->ulApuTDP); 634e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 635e098bc96SEvan Quan le32_to_cpu(ppm->ulDGpuTDP); 636e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 637e098bc96SEvan Quan le32_to_cpu(ppm->ulDGpuUlvPower); 638e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->tj_max = 639e098bc96SEvan Quan le32_to_cpu(ppm->ulTjmax); 640e098bc96SEvan Quan } 641e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 642e098bc96SEvan Quan ext_hdr->usACPTableOffset) { 643e098bc96SEvan Quan ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 644e098bc96SEvan Quan (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 645e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 646e098bc96SEvan Quan le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 647e098bc96SEvan Quan ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 648e098bc96SEvan Quan u32 size = limits->numEntries * 649e098bc96SEvan Quan sizeof(struct amdgpu_clock_voltage_dependency_entry); 650e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 651e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 652e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 653e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 654e098bc96SEvan Quan return -ENOMEM; 655e098bc96SEvan Quan } 656e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 657e098bc96SEvan Quan limits->numEntries; 658e098bc96SEvan Quan entry = &limits->entries[0]; 659e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 660e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 661e098bc96SEvan Quan le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 662e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 663e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 664e098bc96SEvan Quan entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 665e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 666e098bc96SEvan Quan } 667e098bc96SEvan Quan } 668e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 669e098bc96SEvan Quan ext_hdr->usPowerTuneTableOffset) { 670e098bc96SEvan Quan u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 671e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 672e098bc96SEvan Quan ATOM_PowerTune_Table *pt; 673e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table = 674e098bc96SEvan Quan kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); 675e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.cac_tdp_table) { 676e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 677e098bc96SEvan Quan return -ENOMEM; 678e098bc96SEvan Quan } 679e098bc96SEvan Quan if (rev > 0) { 680e098bc96SEvan Quan ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 681e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 682e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 683e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 684e098bc96SEvan Quan ppt->usMaximumPowerDeliveryLimit; 685e098bc96SEvan Quan pt = &ppt->power_tune_table; 686e098bc96SEvan Quan } else { 687e098bc96SEvan Quan ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 688e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 689e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 690e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 691e098bc96SEvan Quan pt = &ppt->power_tune_table; 692e098bc96SEvan Quan } 693e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 694e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 695e098bc96SEvan Quan le16_to_cpu(pt->usConfigurableTDP); 696e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 697e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 698e098bc96SEvan Quan le16_to_cpu(pt->usBatteryPowerLimit); 699e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 700e098bc96SEvan Quan le16_to_cpu(pt->usSmallPowerLimit); 701e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 702e098bc96SEvan Quan le16_to_cpu(pt->usLowCACLeakage); 703e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 704e098bc96SEvan Quan le16_to_cpu(pt->usHighCACLeakage); 705e098bc96SEvan Quan } 706e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && 707e098bc96SEvan Quan ext_hdr->usSclkVddgfxTableOffset) { 708e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 709e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 710e098bc96SEvan Quan le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); 711e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table( 712e098bc96SEvan Quan &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, 713e098bc96SEvan Quan dep_table); 714e098bc96SEvan Quan if (ret) { 715e098bc96SEvan Quan kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); 716e098bc96SEvan Quan return ret; 717e098bc96SEvan Quan } 718e098bc96SEvan Quan } 719e098bc96SEvan Quan } 720e098bc96SEvan Quan 721e098bc96SEvan Quan return 0; 722e098bc96SEvan Quan } 723e098bc96SEvan Quan 724e098bc96SEvan Quan void amdgpu_free_extended_power_table(struct amdgpu_device *adev) 725e098bc96SEvan Quan { 726e098bc96SEvan Quan struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; 727e098bc96SEvan Quan 728e098bc96SEvan Quan kfree(dyn_state->vddc_dependency_on_sclk.entries); 729e098bc96SEvan Quan kfree(dyn_state->vddci_dependency_on_mclk.entries); 730e098bc96SEvan Quan kfree(dyn_state->vddc_dependency_on_mclk.entries); 731e098bc96SEvan Quan kfree(dyn_state->mvdd_dependency_on_mclk.entries); 732e098bc96SEvan Quan kfree(dyn_state->cac_leakage_table.entries); 733e098bc96SEvan Quan kfree(dyn_state->phase_shedding_limits_table.entries); 734e098bc96SEvan Quan kfree(dyn_state->ppm_table); 735e098bc96SEvan Quan kfree(dyn_state->cac_tdp_table); 736e098bc96SEvan Quan kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 737e098bc96SEvan Quan kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 738e098bc96SEvan Quan kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 739e098bc96SEvan Quan kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 740e098bc96SEvan Quan kfree(dyn_state->vddgfx_dependency_on_sclk.entries); 741e098bc96SEvan Quan } 742e098bc96SEvan Quan 743e098bc96SEvan Quan static const char *pp_lib_thermal_controller_names[] = { 744e098bc96SEvan Quan "NONE", 745e098bc96SEvan Quan "lm63", 746e098bc96SEvan Quan "adm1032", 747e098bc96SEvan Quan "adm1030", 748e098bc96SEvan Quan "max6649", 749e098bc96SEvan Quan "lm64", 750e098bc96SEvan Quan "f75375", 751e098bc96SEvan Quan "RV6xx", 752e098bc96SEvan Quan "RV770", 753e098bc96SEvan Quan "adt7473", 754e098bc96SEvan Quan "NONE", 755e098bc96SEvan Quan "External GPIO", 756e098bc96SEvan Quan "Evergreen", 757e098bc96SEvan Quan "emc2103", 758e098bc96SEvan Quan "Sumo", 759e098bc96SEvan Quan "Northern Islands", 760e098bc96SEvan Quan "Southern Islands", 761e098bc96SEvan Quan "lm96163", 762e098bc96SEvan Quan "Sea Islands", 763e098bc96SEvan Quan "Kaveri/Kabini", 764e098bc96SEvan Quan }; 765e098bc96SEvan Quan 766e098bc96SEvan Quan void amdgpu_add_thermal_controller(struct amdgpu_device *adev) 767e098bc96SEvan Quan { 768e098bc96SEvan Quan struct amdgpu_mode_info *mode_info = &adev->mode_info; 769e098bc96SEvan Quan ATOM_PPLIB_POWERPLAYTABLE *power_table; 770e098bc96SEvan Quan int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 771e098bc96SEvan Quan ATOM_PPLIB_THERMALCONTROLLER *controller; 772e098bc96SEvan Quan struct amdgpu_i2c_bus_rec i2c_bus; 773e098bc96SEvan Quan u16 data_offset; 774e098bc96SEvan Quan u8 frev, crev; 775e098bc96SEvan Quan 776e098bc96SEvan Quan if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 777e098bc96SEvan Quan &frev, &crev, &data_offset)) 778e098bc96SEvan Quan return; 779e098bc96SEvan Quan power_table = (ATOM_PPLIB_POWERPLAYTABLE *) 780e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset); 781e098bc96SEvan Quan controller = &power_table->sThermalController; 782e098bc96SEvan Quan 783e098bc96SEvan Quan /* add the i2c bus for thermal/fan chip */ 784e098bc96SEvan Quan if (controller->ucType > 0) { 785e098bc96SEvan Quan if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) 786e098bc96SEvan Quan adev->pm.no_fan = true; 787e098bc96SEvan Quan adev->pm.fan_pulses_per_revolution = 788e098bc96SEvan Quan controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; 789e098bc96SEvan Quan if (adev->pm.fan_pulses_per_revolution) { 790e098bc96SEvan Quan adev->pm.fan_min_rpm = controller->ucFanMinRPM; 791e098bc96SEvan Quan adev->pm.fan_max_rpm = controller->ucFanMaxRPM; 792e098bc96SEvan Quan } 793e098bc96SEvan Quan if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { 794e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 795e098bc96SEvan Quan (controller->ucFanParameters & 796e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 797e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; 798e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { 799e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 800e098bc96SEvan Quan (controller->ucFanParameters & 801e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 802e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_RV770; 803e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { 804e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 805e098bc96SEvan Quan (controller->ucFanParameters & 806e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 807e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; 808e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { 809e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 810e098bc96SEvan Quan (controller->ucFanParameters & 811e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 812e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; 813e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { 814e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 815e098bc96SEvan Quan (controller->ucFanParameters & 816e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 817e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_NI; 818e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { 819e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 820e098bc96SEvan Quan (controller->ucFanParameters & 821e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 822e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_SI; 823e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { 824e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 825e098bc96SEvan Quan (controller->ucFanParameters & 826e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 827e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_CI; 828e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { 829e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 830e098bc96SEvan Quan (controller->ucFanParameters & 831e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 832e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_KV; 833e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { 834e098bc96SEvan Quan DRM_INFO("External GPIO thermal controller %s fan control\n", 835e098bc96SEvan Quan (controller->ucFanParameters & 836e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 837e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; 838e098bc96SEvan Quan } else if (controller->ucType == 839e098bc96SEvan Quan ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { 840e098bc96SEvan Quan DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", 841e098bc96SEvan Quan (controller->ucFanParameters & 842e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 843e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; 844e098bc96SEvan Quan } else if (controller->ucType == 845e098bc96SEvan Quan ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { 846e098bc96SEvan Quan DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", 847e098bc96SEvan Quan (controller->ucFanParameters & 848e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 849e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; 850e098bc96SEvan Quan } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { 851e098bc96SEvan Quan DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 852e098bc96SEvan Quan pp_lib_thermal_controller_names[controller->ucType], 853e098bc96SEvan Quan controller->ucI2cAddress >> 1, 854e098bc96SEvan Quan (controller->ucFanParameters & 855e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 856e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; 857e098bc96SEvan Quan i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); 858e098bc96SEvan Quan adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); 859e098bc96SEvan Quan if (adev->pm.i2c_bus) { 860e098bc96SEvan Quan struct i2c_board_info info = { }; 861e098bc96SEvan Quan const char *name = pp_lib_thermal_controller_names[controller->ucType]; 862e098bc96SEvan Quan info.addr = controller->ucI2cAddress >> 1; 863e098bc96SEvan Quan strlcpy(info.type, name, sizeof(info.type)); 864e098bc96SEvan Quan i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); 865e098bc96SEvan Quan } 866e098bc96SEvan Quan } else { 867e098bc96SEvan Quan DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", 868e098bc96SEvan Quan controller->ucType, 869e098bc96SEvan Quan controller->ucI2cAddress >> 1, 870e098bc96SEvan Quan (controller->ucFanParameters & 871e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 872e098bc96SEvan Quan } 873e098bc96SEvan Quan } 874e098bc96SEvan Quan } 875e098bc96SEvan Quan 876e098bc96SEvan Quan enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, 877e098bc96SEvan Quan u32 sys_mask, 878e098bc96SEvan Quan enum amdgpu_pcie_gen asic_gen, 879e098bc96SEvan Quan enum amdgpu_pcie_gen default_gen) 880e098bc96SEvan Quan { 881e098bc96SEvan Quan switch (asic_gen) { 882e098bc96SEvan Quan case AMDGPU_PCIE_GEN1: 883e098bc96SEvan Quan return AMDGPU_PCIE_GEN1; 884e098bc96SEvan Quan case AMDGPU_PCIE_GEN2: 885e098bc96SEvan Quan return AMDGPU_PCIE_GEN2; 886e098bc96SEvan Quan case AMDGPU_PCIE_GEN3: 887e098bc96SEvan Quan return AMDGPU_PCIE_GEN3; 888e098bc96SEvan Quan default: 889e098bc96SEvan Quan if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && 890e098bc96SEvan Quan (default_gen == AMDGPU_PCIE_GEN3)) 891e098bc96SEvan Quan return AMDGPU_PCIE_GEN3; 892e098bc96SEvan Quan else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && 893e098bc96SEvan Quan (default_gen == AMDGPU_PCIE_GEN2)) 894e098bc96SEvan Quan return AMDGPU_PCIE_GEN2; 895e098bc96SEvan Quan else 896e098bc96SEvan Quan return AMDGPU_PCIE_GEN1; 897e098bc96SEvan Quan } 898e098bc96SEvan Quan return AMDGPU_PCIE_GEN1; 899e098bc96SEvan Quan } 900e098bc96SEvan Quan 901e098bc96SEvan Quan struct amd_vce_state* 902e098bc96SEvan Quan amdgpu_get_vce_clock_state(void *handle, u32 idx) 903e098bc96SEvan Quan { 904e098bc96SEvan Quan struct amdgpu_device *adev = (struct amdgpu_device *)handle; 905e098bc96SEvan Quan 906e098bc96SEvan Quan if (idx < adev->pm.dpm.num_of_vce_states) 907e098bc96SEvan Quan return &adev->pm.dpm.vce_states[idx]; 908e098bc96SEvan Quan 909e098bc96SEvan Quan return NULL; 910e098bc96SEvan Quan } 911e098bc96SEvan Quan 912e098bc96SEvan Quan int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 913e098bc96SEvan Quan { 914e098bc96SEvan Quan uint32_t clk_freq; 915e098bc96SEvan Quan int ret = 0; 916e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 917e098bc96SEvan Quan ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, 918e098bc96SEvan Quan low ? &clk_freq : NULL, 919e098bc96SEvan Quan !low ? &clk_freq : NULL); 920e098bc96SEvan Quan if (ret) 921e098bc96SEvan Quan return 0; 922e098bc96SEvan Quan return clk_freq * 100; 923e098bc96SEvan Quan 924e098bc96SEvan Quan } else { 925e098bc96SEvan Quan return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); 926e098bc96SEvan Quan } 927e098bc96SEvan Quan } 928e098bc96SEvan Quan 929e098bc96SEvan Quan int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 930e098bc96SEvan Quan { 931e098bc96SEvan Quan uint32_t clk_freq; 932e098bc96SEvan Quan int ret = 0; 933e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 934e098bc96SEvan Quan ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, 935e098bc96SEvan Quan low ? &clk_freq : NULL, 936e098bc96SEvan Quan !low ? &clk_freq : NULL); 937e098bc96SEvan Quan if (ret) 938e098bc96SEvan Quan return 0; 939e098bc96SEvan Quan return clk_freq * 100; 940e098bc96SEvan Quan 941e098bc96SEvan Quan } else { 942e098bc96SEvan Quan return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); 943e098bc96SEvan Quan } 944e098bc96SEvan Quan } 945e098bc96SEvan Quan 946e098bc96SEvan Quan int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 947e098bc96SEvan Quan { 948e098bc96SEvan Quan int ret = 0; 949e098bc96SEvan Quan bool swsmu = is_support_sw_smu(adev); 950e098bc96SEvan Quan 951e098bc96SEvan Quan switch (block_type) { 952e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_UVD: 953e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_VCE: 954e098bc96SEvan Quan if (swsmu) { 955e098bc96SEvan Quan ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 956e098bc96SEvan Quan } else if (adev->powerplay.pp_funcs && 957e098bc96SEvan Quan adev->powerplay.pp_funcs->set_powergating_by_smu) { 958e098bc96SEvan Quan /* 959e098bc96SEvan Quan * TODO: need a better lock mechanism 960e098bc96SEvan Quan * 961e098bc96SEvan Quan * Here adev->pm.mutex lock protection is enforced on 962e098bc96SEvan Quan * UVD and VCE cases only. Since for other cases, there 963e098bc96SEvan Quan * may be already lock protection in amdgpu_pm.c. 964e098bc96SEvan Quan * This is a quick fix for the deadlock issue below. 965e098bc96SEvan Quan * NFO: task ocltst:2028 blocked for more than 120 seconds. 966e098bc96SEvan Quan * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu 967e098bc96SEvan Quan * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. 968e098bc96SEvan Quan * cltst D 0 2028 2026 0x00000000 969e098bc96SEvan Quan * all Trace: 970e098bc96SEvan Quan * __schedule+0x2c0/0x870 971e098bc96SEvan Quan * schedule+0x2c/0x70 972e098bc96SEvan Quan * schedule_preempt_disabled+0xe/0x10 973e098bc96SEvan Quan * __mutex_lock.isra.9+0x26d/0x4e0 974e098bc96SEvan Quan * __mutex_lock_slowpath+0x13/0x20 975e098bc96SEvan Quan * ? __mutex_lock_slowpath+0x13/0x20 976e098bc96SEvan Quan * mutex_lock+0x2f/0x40 977e098bc96SEvan Quan * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] 978e098bc96SEvan Quan * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] 979e098bc96SEvan Quan * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] 980e098bc96SEvan Quan * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] 981e098bc96SEvan Quan * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] 982e098bc96SEvan Quan * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] 983e098bc96SEvan Quan */ 984e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 985e098bc96SEvan Quan ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 986e098bc96SEvan Quan (adev)->powerplay.pp_handle, block_type, gate)); 987e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 988e098bc96SEvan Quan } 989e098bc96SEvan Quan break; 990e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_GFX: 991e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_VCN: 992e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_SDMA: 993e098bc96SEvan Quan if (swsmu) 994e098bc96SEvan Quan ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 995e098bc96SEvan Quan else if (adev->powerplay.pp_funcs && 996e098bc96SEvan Quan adev->powerplay.pp_funcs->set_powergating_by_smu) 997e098bc96SEvan Quan ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 998e098bc96SEvan Quan (adev)->powerplay.pp_handle, block_type, gate)); 999e098bc96SEvan Quan break; 1000e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_JPEG: 1001e098bc96SEvan Quan if (swsmu) 1002e098bc96SEvan Quan ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 1003e098bc96SEvan Quan break; 1004e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_GMC: 1005e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_ACP: 1006e098bc96SEvan Quan if (adev->powerplay.pp_funcs && 1007e098bc96SEvan Quan adev->powerplay.pp_funcs->set_powergating_by_smu) 1008e098bc96SEvan Quan ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 1009e098bc96SEvan Quan (adev)->powerplay.pp_handle, block_type, gate)); 1010e098bc96SEvan Quan break; 1011e098bc96SEvan Quan default: 1012e098bc96SEvan Quan break; 1013e098bc96SEvan Quan } 1014e098bc96SEvan Quan 1015e098bc96SEvan Quan return ret; 1016e098bc96SEvan Quan } 1017e098bc96SEvan Quan 1018e098bc96SEvan Quan int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 1019e098bc96SEvan Quan { 1020e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1021e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1022e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1023e098bc96SEvan Quan int ret = 0; 1024e098bc96SEvan Quan 1025e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 1026e098bc96SEvan Quan ret = smu_baco_enter(smu); 1027e098bc96SEvan Quan } else { 1028e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1029e098bc96SEvan Quan return -ENOENT; 1030e098bc96SEvan Quan 1031e098bc96SEvan Quan /* enter BACO state */ 1032e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1033e098bc96SEvan Quan } 1034e098bc96SEvan Quan 1035e098bc96SEvan Quan return ret; 1036e098bc96SEvan Quan } 1037e098bc96SEvan Quan 1038e098bc96SEvan Quan int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 1039e098bc96SEvan Quan { 1040e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1041e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1042e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1043e098bc96SEvan Quan int ret = 0; 1044e098bc96SEvan Quan 1045e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 1046e098bc96SEvan Quan ret = smu_baco_exit(smu); 1047e098bc96SEvan Quan } else { 1048e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1049e098bc96SEvan Quan return -ENOENT; 1050e098bc96SEvan Quan 1051e098bc96SEvan Quan /* exit BACO state */ 1052e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1053e098bc96SEvan Quan } 1054e098bc96SEvan Quan 1055e098bc96SEvan Quan return ret; 1056e098bc96SEvan Quan } 1057e098bc96SEvan Quan 1058e098bc96SEvan Quan int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 1059e098bc96SEvan Quan enum pp_mp1_state mp1_state) 1060e098bc96SEvan Quan { 1061e098bc96SEvan Quan int ret = 0; 1062e098bc96SEvan Quan 1063e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 1064e098bc96SEvan Quan ret = smu_set_mp1_state(&adev->smu, mp1_state); 1065e098bc96SEvan Quan } else if (adev->powerplay.pp_funcs && 1066e098bc96SEvan Quan adev->powerplay.pp_funcs->set_mp1_state) { 1067e098bc96SEvan Quan ret = adev->powerplay.pp_funcs->set_mp1_state( 1068e098bc96SEvan Quan adev->powerplay.pp_handle, 1069e098bc96SEvan Quan mp1_state); 1070e098bc96SEvan Quan } 1071e098bc96SEvan Quan 1072e098bc96SEvan Quan return ret; 1073e098bc96SEvan Quan } 1074e098bc96SEvan Quan 1075e098bc96SEvan Quan bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 1076e098bc96SEvan Quan { 1077e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1078e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1079e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1080e098bc96SEvan Quan bool baco_cap; 1081e098bc96SEvan Quan 1082e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 1083e098bc96SEvan Quan return smu_baco_is_support(smu); 1084e098bc96SEvan Quan } else { 1085e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 1086e098bc96SEvan Quan return false; 1087e098bc96SEvan Quan 1088e098bc96SEvan Quan if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) 1089e098bc96SEvan Quan return false; 1090e098bc96SEvan Quan 1091e098bc96SEvan Quan return baco_cap ? true : false; 1092e098bc96SEvan Quan } 1093e098bc96SEvan Quan } 1094e098bc96SEvan Quan 1095e098bc96SEvan Quan int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 1096e098bc96SEvan Quan { 1097e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1098e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1099e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1100e098bc96SEvan Quan 1101e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 1102e098bc96SEvan Quan return smu_mode2_reset(smu); 1103e098bc96SEvan Quan } else { 1104e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 1105e098bc96SEvan Quan return -ENOENT; 1106e098bc96SEvan Quan 1107e098bc96SEvan Quan return pp_funcs->asic_reset_mode_2(pp_handle); 1108e098bc96SEvan Quan } 1109e098bc96SEvan Quan } 1110e098bc96SEvan Quan 1111e098bc96SEvan Quan int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 1112e098bc96SEvan Quan { 1113e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1114e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1115e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1116e098bc96SEvan Quan int ret = 0; 1117e098bc96SEvan Quan 1118e098bc96SEvan Quan dev_info(adev->dev, "GPU BACO reset\n"); 1119e098bc96SEvan Quan 1120e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 1121e098bc96SEvan Quan ret = smu_baco_enter(smu); 1122e098bc96SEvan Quan if (ret) 1123e098bc96SEvan Quan return ret; 1124e098bc96SEvan Quan 1125e098bc96SEvan Quan ret = smu_baco_exit(smu); 1126e098bc96SEvan Quan if (ret) 1127e098bc96SEvan Quan return ret; 1128e098bc96SEvan Quan } else { 1129e098bc96SEvan Quan if (!pp_funcs 1130e098bc96SEvan Quan || !pp_funcs->set_asic_baco_state) 1131e098bc96SEvan Quan return -ENOENT; 1132e098bc96SEvan Quan 1133e098bc96SEvan Quan /* enter BACO state */ 1134e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1135e098bc96SEvan Quan if (ret) 1136e098bc96SEvan Quan return ret; 1137e098bc96SEvan Quan 1138e098bc96SEvan Quan /* exit BACO state */ 1139e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1140e098bc96SEvan Quan if (ret) 1141e098bc96SEvan Quan return ret; 1142e098bc96SEvan Quan } 1143e098bc96SEvan Quan 1144e098bc96SEvan Quan return 0; 1145e098bc96SEvan Quan } 1146e098bc96SEvan Quan 1147e098bc96SEvan Quan bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 1148e098bc96SEvan Quan { 1149e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1150e098bc96SEvan Quan 1151e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1152e098bc96SEvan Quan return smu_mode1_reset_is_support(smu); 1153e098bc96SEvan Quan 1154e098bc96SEvan Quan return false; 1155e098bc96SEvan Quan } 1156e098bc96SEvan Quan 1157e098bc96SEvan Quan int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 1158e098bc96SEvan Quan { 1159e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1160e098bc96SEvan Quan 1161e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1162e098bc96SEvan Quan return smu_mode1_reset(smu); 1163e098bc96SEvan Quan 1164e098bc96SEvan Quan return -EOPNOTSUPP; 1165e098bc96SEvan Quan } 1166e098bc96SEvan Quan 1167e098bc96SEvan Quan int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 1168e098bc96SEvan Quan enum PP_SMC_POWER_PROFILE type, 1169e098bc96SEvan Quan bool en) 1170e098bc96SEvan Quan { 1171e098bc96SEvan Quan int ret = 0; 1172e098bc96SEvan Quan 1173e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1174e098bc96SEvan Quan ret = smu_switch_power_profile(&adev->smu, type, en); 1175e098bc96SEvan Quan else if (adev->powerplay.pp_funcs && 1176e098bc96SEvan Quan adev->powerplay.pp_funcs->switch_power_profile) 1177e098bc96SEvan Quan ret = adev->powerplay.pp_funcs->switch_power_profile( 1178e098bc96SEvan Quan adev->powerplay.pp_handle, type, en); 1179e098bc96SEvan Quan 1180e098bc96SEvan Quan return ret; 1181e098bc96SEvan Quan } 1182e098bc96SEvan Quan 1183e098bc96SEvan Quan int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 1184e098bc96SEvan Quan uint32_t pstate) 1185e098bc96SEvan Quan { 1186e098bc96SEvan Quan int ret = 0; 1187e098bc96SEvan Quan 1188e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1189e098bc96SEvan Quan ret = smu_set_xgmi_pstate(&adev->smu, pstate); 1190e098bc96SEvan Quan else if (adev->powerplay.pp_funcs && 1191e098bc96SEvan Quan adev->powerplay.pp_funcs->set_xgmi_pstate) 1192e098bc96SEvan Quan ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 1193e098bc96SEvan Quan pstate); 1194e098bc96SEvan Quan 1195e098bc96SEvan Quan return ret; 1196e098bc96SEvan Quan } 1197e098bc96SEvan Quan 1198e098bc96SEvan Quan int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 1199e098bc96SEvan Quan uint32_t cstate) 1200e098bc96SEvan Quan { 1201e098bc96SEvan Quan int ret = 0; 1202e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1203e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1204e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1205e098bc96SEvan Quan 1206e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1207e098bc96SEvan Quan ret = smu_set_df_cstate(smu, cstate); 1208e098bc96SEvan Quan else if (pp_funcs && 1209e098bc96SEvan Quan pp_funcs->set_df_cstate) 1210e098bc96SEvan Quan ret = pp_funcs->set_df_cstate(pp_handle, cstate); 1211e098bc96SEvan Quan 1212e098bc96SEvan Quan return ret; 1213e098bc96SEvan Quan } 1214e098bc96SEvan Quan 1215e098bc96SEvan Quan int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 1216e098bc96SEvan Quan { 1217e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1218e098bc96SEvan Quan 1219e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1220e098bc96SEvan Quan return smu_allow_xgmi_power_down(smu, en); 1221e098bc96SEvan Quan 1222e098bc96SEvan Quan return 0; 1223e098bc96SEvan Quan } 1224e098bc96SEvan Quan 1225e098bc96SEvan Quan int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 1226e098bc96SEvan Quan { 1227e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1228e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = 1229e098bc96SEvan Quan adev->powerplay.pp_funcs; 1230e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1231e098bc96SEvan Quan int ret = 0; 1232e098bc96SEvan Quan 1233e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1234e098bc96SEvan Quan ret = smu_enable_mgpu_fan_boost(smu); 1235e098bc96SEvan Quan else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) 1236e098bc96SEvan Quan ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 1237e098bc96SEvan Quan 1238e098bc96SEvan Quan return ret; 1239e098bc96SEvan Quan } 1240e098bc96SEvan Quan 1241e098bc96SEvan Quan int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 1242e098bc96SEvan Quan uint32_t msg_id) 1243e098bc96SEvan Quan { 1244e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1245e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = 1246e098bc96SEvan Quan adev->powerplay.pp_funcs; 1247e098bc96SEvan Quan int ret = 0; 1248e098bc96SEvan Quan 1249e098bc96SEvan Quan if (pp_funcs && pp_funcs->set_clockgating_by_smu) 1250e098bc96SEvan Quan ret = pp_funcs->set_clockgating_by_smu(pp_handle, 1251e098bc96SEvan Quan msg_id); 1252e098bc96SEvan Quan 1253e098bc96SEvan Quan return ret; 1254e098bc96SEvan Quan } 1255e098bc96SEvan Quan 1256e098bc96SEvan Quan int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 1257e098bc96SEvan Quan bool acquire) 1258e098bc96SEvan Quan { 1259e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1260e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = 1261e098bc96SEvan Quan adev->powerplay.pp_funcs; 1262e098bc96SEvan Quan int ret = -EOPNOTSUPP; 1263e098bc96SEvan Quan 1264e098bc96SEvan Quan if (pp_funcs && pp_funcs->smu_i2c_bus_access) 1265e098bc96SEvan Quan ret = pp_funcs->smu_i2c_bus_access(pp_handle, 1266e098bc96SEvan Quan acquire); 1267e098bc96SEvan Quan 1268e098bc96SEvan Quan return ret; 1269e098bc96SEvan Quan } 1270e098bc96SEvan Quan 1271e098bc96SEvan Quan void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 1272e098bc96SEvan Quan { 1273e098bc96SEvan Quan if (adev->pm.dpm_enabled) { 1274e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1275e098bc96SEvan Quan if (power_supply_is_system_supplied() > 0) 1276e098bc96SEvan Quan adev->pm.ac_power = true; 1277e098bc96SEvan Quan else 1278e098bc96SEvan Quan adev->pm.ac_power = false; 1279e098bc96SEvan Quan if (adev->powerplay.pp_funcs && 1280e098bc96SEvan Quan adev->powerplay.pp_funcs->enable_bapm) 1281e098bc96SEvan Quan amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 1282e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1283e098bc96SEvan Quan 1284e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1285e098bc96SEvan Quan smu_set_ac_dc(&adev->smu); 1286e098bc96SEvan Quan } 1287e098bc96SEvan Quan } 1288e098bc96SEvan Quan 1289e098bc96SEvan Quan int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 1290e098bc96SEvan Quan void *data, uint32_t *size) 1291e098bc96SEvan Quan { 1292e098bc96SEvan Quan int ret = 0; 1293e098bc96SEvan Quan 1294e098bc96SEvan Quan if (!data || !size) 1295e098bc96SEvan Quan return -EINVAL; 1296e098bc96SEvan Quan 1297e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1298e098bc96SEvan Quan ret = smu_read_sensor(&adev->smu, sensor, data, size); 1299e098bc96SEvan Quan else { 1300e098bc96SEvan Quan if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 1301e098bc96SEvan Quan ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, 1302e098bc96SEvan Quan sensor, data, size); 1303e098bc96SEvan Quan else 1304e098bc96SEvan Quan ret = -EINVAL; 1305e098bc96SEvan Quan } 1306e098bc96SEvan Quan 1307e098bc96SEvan Quan return ret; 1308e098bc96SEvan Quan } 1309e098bc96SEvan Quan 1310e098bc96SEvan Quan void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1311e098bc96SEvan Quan { 1312e098bc96SEvan Quan struct amdgpu_device *adev = 1313e098bc96SEvan Quan container_of(work, struct amdgpu_device, 1314e098bc96SEvan Quan pm.dpm.thermal.work); 1315e098bc96SEvan Quan /* switch to the thermal state */ 1316e098bc96SEvan Quan enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1317e098bc96SEvan Quan int temp, size = sizeof(temp); 1318e098bc96SEvan Quan 1319e098bc96SEvan Quan if (!adev->pm.dpm_enabled) 1320e098bc96SEvan Quan return; 1321e098bc96SEvan Quan 1322e098bc96SEvan Quan if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 1323e098bc96SEvan Quan (void *)&temp, &size)) { 1324e098bc96SEvan Quan if (temp < adev->pm.dpm.thermal.min_temp) 1325e098bc96SEvan Quan /* switch back the user state */ 1326e098bc96SEvan Quan dpm_state = adev->pm.dpm.user_state; 1327e098bc96SEvan Quan } else { 1328e098bc96SEvan Quan if (adev->pm.dpm.thermal.high_to_low) 1329e098bc96SEvan Quan /* switch back the user state */ 1330e098bc96SEvan Quan dpm_state = adev->pm.dpm.user_state; 1331e098bc96SEvan Quan } 1332e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1333e098bc96SEvan Quan if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1334e098bc96SEvan Quan adev->pm.dpm.thermal_active = true; 1335e098bc96SEvan Quan else 1336e098bc96SEvan Quan adev->pm.dpm.thermal_active = false; 1337e098bc96SEvan Quan adev->pm.dpm.state = dpm_state; 1338e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1339e098bc96SEvan Quan 1340e098bc96SEvan Quan amdgpu_pm_compute_clocks(adev); 1341e098bc96SEvan Quan } 1342e098bc96SEvan Quan 1343e098bc96SEvan Quan static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 1344e098bc96SEvan Quan enum amd_pm_state_type dpm_state) 1345e098bc96SEvan Quan { 1346e098bc96SEvan Quan int i; 1347e098bc96SEvan Quan struct amdgpu_ps *ps; 1348e098bc96SEvan Quan u32 ui_class; 1349e098bc96SEvan Quan bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 1350e098bc96SEvan Quan true : false; 1351e098bc96SEvan Quan 1352e098bc96SEvan Quan /* check if the vblank period is too short to adjust the mclk */ 1353e098bc96SEvan Quan if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 1354e098bc96SEvan Quan if (amdgpu_dpm_vblank_too_short(adev)) 1355e098bc96SEvan Quan single_display = false; 1356e098bc96SEvan Quan } 1357e098bc96SEvan Quan 1358e098bc96SEvan Quan /* certain older asics have a separare 3D performance state, 1359e098bc96SEvan Quan * so try that first if the user selected performance 1360e098bc96SEvan Quan */ 1361e098bc96SEvan Quan if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 1362e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 1363e098bc96SEvan Quan /* balanced states don't exist at the moment */ 1364e098bc96SEvan Quan if (dpm_state == POWER_STATE_TYPE_BALANCED) 1365e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1366e098bc96SEvan Quan 1367e098bc96SEvan Quan restart_search: 1368e098bc96SEvan Quan /* Pick the best power state based on current conditions */ 1369e098bc96SEvan Quan for (i = 0; i < adev->pm.dpm.num_ps; i++) { 1370e098bc96SEvan Quan ps = &adev->pm.dpm.ps[i]; 1371e098bc96SEvan Quan ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 1372e098bc96SEvan Quan switch (dpm_state) { 1373e098bc96SEvan Quan /* user states */ 1374e098bc96SEvan Quan case POWER_STATE_TYPE_BATTERY: 1375e098bc96SEvan Quan if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 1376e098bc96SEvan Quan if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1377e098bc96SEvan Quan if (single_display) 1378e098bc96SEvan Quan return ps; 1379e098bc96SEvan Quan } else 1380e098bc96SEvan Quan return ps; 1381e098bc96SEvan Quan } 1382e098bc96SEvan Quan break; 1383e098bc96SEvan Quan case POWER_STATE_TYPE_BALANCED: 1384e098bc96SEvan Quan if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 1385e098bc96SEvan Quan if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1386e098bc96SEvan Quan if (single_display) 1387e098bc96SEvan Quan return ps; 1388e098bc96SEvan Quan } else 1389e098bc96SEvan Quan return ps; 1390e098bc96SEvan Quan } 1391e098bc96SEvan Quan break; 1392e098bc96SEvan Quan case POWER_STATE_TYPE_PERFORMANCE: 1393e098bc96SEvan Quan if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 1394e098bc96SEvan Quan if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1395e098bc96SEvan Quan if (single_display) 1396e098bc96SEvan Quan return ps; 1397e098bc96SEvan Quan } else 1398e098bc96SEvan Quan return ps; 1399e098bc96SEvan Quan } 1400e098bc96SEvan Quan break; 1401e098bc96SEvan Quan /* internal states */ 1402e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD: 1403e098bc96SEvan Quan if (adev->pm.dpm.uvd_ps) 1404e098bc96SEvan Quan return adev->pm.dpm.uvd_ps; 1405e098bc96SEvan Quan else 1406e098bc96SEvan Quan break; 1407e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1408e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 1409e098bc96SEvan Quan return ps; 1410e098bc96SEvan Quan break; 1411e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1412e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 1413e098bc96SEvan Quan return ps; 1414e098bc96SEvan Quan break; 1415e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1416e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 1417e098bc96SEvan Quan return ps; 1418e098bc96SEvan Quan break; 1419e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1420e098bc96SEvan Quan if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 1421e098bc96SEvan Quan return ps; 1422e098bc96SEvan Quan break; 1423e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_BOOT: 1424e098bc96SEvan Quan return adev->pm.dpm.boot_ps; 1425e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_THERMAL: 1426e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1427e098bc96SEvan Quan return ps; 1428e098bc96SEvan Quan break; 1429e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_ACPI: 1430e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1431e098bc96SEvan Quan return ps; 1432e098bc96SEvan Quan break; 1433e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_ULV: 1434e098bc96SEvan Quan if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1435e098bc96SEvan Quan return ps; 1436e098bc96SEvan Quan break; 1437e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_3DPERF: 1438e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1439e098bc96SEvan Quan return ps; 1440e098bc96SEvan Quan break; 1441e098bc96SEvan Quan default: 1442e098bc96SEvan Quan break; 1443e098bc96SEvan Quan } 1444e098bc96SEvan Quan } 1445e098bc96SEvan Quan /* use a fallback state if we didn't match */ 1446e098bc96SEvan Quan switch (dpm_state) { 1447e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1448e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1449e098bc96SEvan Quan goto restart_search; 1450e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1451e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1452e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1453e098bc96SEvan Quan if (adev->pm.dpm.uvd_ps) { 1454e098bc96SEvan Quan return adev->pm.dpm.uvd_ps; 1455e098bc96SEvan Quan } else { 1456e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1457e098bc96SEvan Quan goto restart_search; 1458e098bc96SEvan Quan } 1459e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_THERMAL: 1460e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1461e098bc96SEvan Quan goto restart_search; 1462e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_ACPI: 1463e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_BATTERY; 1464e098bc96SEvan Quan goto restart_search; 1465e098bc96SEvan Quan case POWER_STATE_TYPE_BATTERY: 1466e098bc96SEvan Quan case POWER_STATE_TYPE_BALANCED: 1467e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_3DPERF: 1468e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1469e098bc96SEvan Quan goto restart_search; 1470e098bc96SEvan Quan default: 1471e098bc96SEvan Quan break; 1472e098bc96SEvan Quan } 1473e098bc96SEvan Quan 1474e098bc96SEvan Quan return NULL; 1475e098bc96SEvan Quan } 1476e098bc96SEvan Quan 1477e098bc96SEvan Quan static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1478e098bc96SEvan Quan { 1479e098bc96SEvan Quan struct amdgpu_ps *ps; 1480e098bc96SEvan Quan enum amd_pm_state_type dpm_state; 1481e098bc96SEvan Quan int ret; 1482e098bc96SEvan Quan bool equal = false; 1483e098bc96SEvan Quan 1484e098bc96SEvan Quan /* if dpm init failed */ 1485e098bc96SEvan Quan if (!adev->pm.dpm_enabled) 1486e098bc96SEvan Quan return; 1487e098bc96SEvan Quan 1488e098bc96SEvan Quan if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1489e098bc96SEvan Quan /* add other state override checks here */ 1490e098bc96SEvan Quan if ((!adev->pm.dpm.thermal_active) && 1491e098bc96SEvan Quan (!adev->pm.dpm.uvd_active)) 1492e098bc96SEvan Quan adev->pm.dpm.state = adev->pm.dpm.user_state; 1493e098bc96SEvan Quan } 1494e098bc96SEvan Quan dpm_state = adev->pm.dpm.state; 1495e098bc96SEvan Quan 1496e098bc96SEvan Quan ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1497e098bc96SEvan Quan if (ps) 1498e098bc96SEvan Quan adev->pm.dpm.requested_ps = ps; 1499e098bc96SEvan Quan else 1500e098bc96SEvan Quan return; 1501e098bc96SEvan Quan 1502e098bc96SEvan Quan if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 1503e098bc96SEvan Quan printk("switching from power state:\n"); 1504e098bc96SEvan Quan amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1505e098bc96SEvan Quan printk("switching to power state:\n"); 1506e098bc96SEvan Quan amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1507e098bc96SEvan Quan } 1508e098bc96SEvan Quan 1509e098bc96SEvan Quan /* update whether vce is active */ 1510e098bc96SEvan Quan ps->vce_active = adev->pm.dpm.vce_active; 1511e098bc96SEvan Quan if (adev->powerplay.pp_funcs->display_configuration_changed) 1512e098bc96SEvan Quan amdgpu_dpm_display_configuration_changed(adev); 1513e098bc96SEvan Quan 1514e098bc96SEvan Quan ret = amdgpu_dpm_pre_set_power_state(adev); 1515e098bc96SEvan Quan if (ret) 1516e098bc96SEvan Quan return; 1517e098bc96SEvan Quan 1518e098bc96SEvan Quan if (adev->powerplay.pp_funcs->check_state_equal) { 1519e098bc96SEvan Quan if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 1520e098bc96SEvan Quan equal = false; 1521e098bc96SEvan Quan } 1522e098bc96SEvan Quan 1523e098bc96SEvan Quan if (equal) 1524e098bc96SEvan Quan return; 1525e098bc96SEvan Quan 1526e098bc96SEvan Quan amdgpu_dpm_set_power_state(adev); 1527e098bc96SEvan Quan amdgpu_dpm_post_set_power_state(adev); 1528e098bc96SEvan Quan 1529e098bc96SEvan Quan adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1530e098bc96SEvan Quan adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1531e098bc96SEvan Quan 1532e098bc96SEvan Quan if (adev->powerplay.pp_funcs->force_performance_level) { 1533e098bc96SEvan Quan if (adev->pm.dpm.thermal_active) { 1534e098bc96SEvan Quan enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1535e098bc96SEvan Quan /* force low perf level for thermal */ 1536e098bc96SEvan Quan amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 1537e098bc96SEvan Quan /* save the user's level */ 1538e098bc96SEvan Quan adev->pm.dpm.forced_level = level; 1539e098bc96SEvan Quan } else { 1540e098bc96SEvan Quan /* otherwise, user selected level */ 1541e098bc96SEvan Quan amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1542e098bc96SEvan Quan } 1543e098bc96SEvan Quan } 1544e098bc96SEvan Quan } 1545e098bc96SEvan Quan 1546e098bc96SEvan Quan void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1547e098bc96SEvan Quan { 1548e098bc96SEvan Quan int i = 0; 1549e098bc96SEvan Quan 1550e098bc96SEvan Quan if (!adev->pm.dpm_enabled) 1551e098bc96SEvan Quan return; 1552e098bc96SEvan Quan 1553e098bc96SEvan Quan if (adev->mode_info.num_crtc) 1554e098bc96SEvan Quan amdgpu_display_bandwidth_update(adev); 1555e098bc96SEvan Quan 1556e098bc96SEvan Quan for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1557e098bc96SEvan Quan struct amdgpu_ring *ring = adev->rings[i]; 1558e098bc96SEvan Quan if (ring && ring->sched.ready) 1559e098bc96SEvan Quan amdgpu_fence_wait_empty(ring); 1560e098bc96SEvan Quan } 1561e098bc96SEvan Quan 1562e098bc96SEvan Quan if (is_support_sw_smu(adev)) { 1563e098bc96SEvan Quan struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; 1564e098bc96SEvan Quan smu_handle_task(&adev->smu, 1565e098bc96SEvan Quan smu_dpm->dpm_level, 1566e098bc96SEvan Quan AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 1567e098bc96SEvan Quan true); 1568e098bc96SEvan Quan } else { 1569e098bc96SEvan Quan if (adev->powerplay.pp_funcs->dispatch_tasks) { 1570e098bc96SEvan Quan if (!amdgpu_device_has_dc_support(adev)) { 1571e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1572e098bc96SEvan Quan amdgpu_dpm_get_active_displays(adev); 1573e098bc96SEvan Quan adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1574e098bc96SEvan Quan adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1575e098bc96SEvan Quan adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1576e098bc96SEvan Quan /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 1577e098bc96SEvan Quan if (adev->pm.pm_display_cfg.vrefresh > 120) 1578e098bc96SEvan Quan adev->pm.pm_display_cfg.min_vblank_time = 0; 1579e098bc96SEvan Quan if (adev->powerplay.pp_funcs->display_configuration_change) 1580e098bc96SEvan Quan adev->powerplay.pp_funcs->display_configuration_change( 1581e098bc96SEvan Quan adev->powerplay.pp_handle, 1582e098bc96SEvan Quan &adev->pm.pm_display_cfg); 1583e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1584e098bc96SEvan Quan } 1585e098bc96SEvan Quan amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 1586e098bc96SEvan Quan } else { 1587e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1588e098bc96SEvan Quan amdgpu_dpm_get_active_displays(adev); 1589e098bc96SEvan Quan amdgpu_dpm_change_power_state_locked(adev); 1590e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1591e098bc96SEvan Quan } 1592e098bc96SEvan Quan } 1593e098bc96SEvan Quan } 1594e098bc96SEvan Quan 1595e098bc96SEvan Quan void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1596e098bc96SEvan Quan { 1597e098bc96SEvan Quan int ret = 0; 1598e098bc96SEvan Quan 1599e098bc96SEvan Quan if (adev->family == AMDGPU_FAMILY_SI) { 1600e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1601e098bc96SEvan Quan if (enable) { 1602e098bc96SEvan Quan adev->pm.dpm.uvd_active = true; 1603e098bc96SEvan Quan adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1604e098bc96SEvan Quan } else { 1605e098bc96SEvan Quan adev->pm.dpm.uvd_active = false; 1606e098bc96SEvan Quan } 1607e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1608e098bc96SEvan Quan 1609e098bc96SEvan Quan amdgpu_pm_compute_clocks(adev); 1610e098bc96SEvan Quan } else { 1611e098bc96SEvan Quan ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 1612e098bc96SEvan Quan if (ret) 1613e098bc96SEvan Quan DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 1614e098bc96SEvan Quan enable ? "enable" : "disable", ret); 1615e098bc96SEvan Quan 1616e098bc96SEvan Quan /* enable/disable Low Memory PState for UVD (4k videos) */ 1617e098bc96SEvan Quan if (adev->asic_type == CHIP_STONEY && 1618e098bc96SEvan Quan adev->uvd.decode_image_width >= WIDTH_4K) { 1619e098bc96SEvan Quan struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1620e098bc96SEvan Quan 1621e098bc96SEvan Quan if (hwmgr && hwmgr->hwmgr_func && 1622e098bc96SEvan Quan hwmgr->hwmgr_func->update_nbdpm_pstate) 1623e098bc96SEvan Quan hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 1624e098bc96SEvan Quan !enable, 1625e098bc96SEvan Quan true); 1626e098bc96SEvan Quan } 1627e098bc96SEvan Quan } 1628e098bc96SEvan Quan } 1629e098bc96SEvan Quan 1630e098bc96SEvan Quan void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1631e098bc96SEvan Quan { 1632e098bc96SEvan Quan int ret = 0; 1633e098bc96SEvan Quan 1634e098bc96SEvan Quan if (adev->family == AMDGPU_FAMILY_SI) { 1635e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1636e098bc96SEvan Quan if (enable) { 1637e098bc96SEvan Quan adev->pm.dpm.vce_active = true; 1638e098bc96SEvan Quan /* XXX select vce level based on ring/task */ 1639e098bc96SEvan Quan adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1640e098bc96SEvan Quan } else { 1641e098bc96SEvan Quan adev->pm.dpm.vce_active = false; 1642e098bc96SEvan Quan } 1643e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1644e098bc96SEvan Quan 1645e098bc96SEvan Quan amdgpu_pm_compute_clocks(adev); 1646e098bc96SEvan Quan } else { 1647e098bc96SEvan Quan ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 1648e098bc96SEvan Quan if (ret) 1649e098bc96SEvan Quan DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 1650e098bc96SEvan Quan enable ? "enable" : "disable", ret); 1651e098bc96SEvan Quan } 1652e098bc96SEvan Quan } 1653e098bc96SEvan Quan 1654e098bc96SEvan Quan void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1655e098bc96SEvan Quan { 1656e098bc96SEvan Quan int i; 1657e098bc96SEvan Quan 1658e098bc96SEvan Quan if (adev->powerplay.pp_funcs->print_power_state == NULL) 1659e098bc96SEvan Quan return; 1660e098bc96SEvan Quan 1661e098bc96SEvan Quan for (i = 0; i < adev->pm.dpm.num_ps; i++) 1662e098bc96SEvan Quan amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1663e098bc96SEvan Quan 1664e098bc96SEvan Quan } 1665e098bc96SEvan Quan 1666e098bc96SEvan Quan void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 1667e098bc96SEvan Quan { 1668e098bc96SEvan Quan int ret = 0; 1669e098bc96SEvan Quan 1670e098bc96SEvan Quan ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 1671e098bc96SEvan Quan if (ret) 1672e098bc96SEvan Quan DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 1673e098bc96SEvan Quan enable ? "enable" : "disable", ret); 1674e098bc96SEvan Quan } 1675e098bc96SEvan Quan 1676e098bc96SEvan Quan int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 1677e098bc96SEvan Quan { 1678e098bc96SEvan Quan int r; 1679e098bc96SEvan Quan 1680e098bc96SEvan Quan if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 1681e098bc96SEvan Quan r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 1682e098bc96SEvan Quan if (r) { 1683e098bc96SEvan Quan pr_err("smu firmware loading failed\n"); 1684e098bc96SEvan Quan return r; 1685e098bc96SEvan Quan } 1686e098bc96SEvan Quan *smu_version = adev->pm.fw_version; 1687e098bc96SEvan Quan } 1688e098bc96SEvan Quan return 0; 1689e098bc96SEvan Quan } 1690