1e098bc96SEvan Quan /* 2e098bc96SEvan Quan * Copyright 2011 Advanced Micro Devices, Inc. 3e098bc96SEvan Quan * 4e098bc96SEvan Quan * Permission is hereby granted, free of charge, to any person obtaining a 5e098bc96SEvan Quan * copy of this software and associated documentation files (the "Software"), 6e098bc96SEvan Quan * to deal in the Software without restriction, including without limitation 7e098bc96SEvan Quan * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8e098bc96SEvan Quan * and/or sell copies of the Software, and to permit persons to whom the 9e098bc96SEvan Quan * Software is furnished to do so, subject to the following conditions: 10e098bc96SEvan Quan * 11e098bc96SEvan Quan * The above copyright notice and this permission notice shall be included in 12e098bc96SEvan Quan * all copies or substantial portions of the Software. 13e098bc96SEvan Quan * 14e098bc96SEvan Quan * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15e098bc96SEvan Quan * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16e098bc96SEvan Quan * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17e098bc96SEvan Quan * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18e098bc96SEvan Quan * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19e098bc96SEvan Quan * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20e098bc96SEvan Quan * OTHER DEALINGS IN THE SOFTWARE. 21e098bc96SEvan Quan * 22e098bc96SEvan Quan * Authors: Alex Deucher 23e098bc96SEvan Quan */ 24e098bc96SEvan Quan 25e098bc96SEvan Quan #include "amdgpu.h" 26e098bc96SEvan Quan #include "amdgpu_atombios.h" 27e098bc96SEvan Quan #include "amdgpu_i2c.h" 28e098bc96SEvan Quan #include "amdgpu_dpm.h" 29e098bc96SEvan Quan #include "atom.h" 30e098bc96SEvan Quan #include "amd_pcie.h" 31e098bc96SEvan Quan #include "amdgpu_display.h" 32e098bc96SEvan Quan #include "hwmgr.h" 33e098bc96SEvan Quan #include <linux/power_supply.h> 34e098bc96SEvan Quan 35e098bc96SEvan Quan #define WIDTH_4K 3840 36e098bc96SEvan Quan 37*d4481576SEvan Quan #define amdgpu_dpm_pre_set_power_state(adev) \ 38*d4481576SEvan Quan ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) 39*d4481576SEvan Quan 40*d4481576SEvan Quan #define amdgpu_dpm_post_set_power_state(adev) \ 41*d4481576SEvan Quan ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) 42*d4481576SEvan Quan 43*d4481576SEvan Quan #define amdgpu_dpm_display_configuration_changed(adev) \ 44*d4481576SEvan Quan ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) 45*d4481576SEvan Quan 46*d4481576SEvan Quan #define amdgpu_dpm_print_power_state(adev, ps) \ 47*d4481576SEvan Quan ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) 48*d4481576SEvan Quan 49*d4481576SEvan Quan #define amdgpu_dpm_vblank_too_short(adev) \ 50*d4481576SEvan Quan ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) 51*d4481576SEvan Quan 52*d4481576SEvan Quan #define amdgpu_dpm_enable_bapm(adev, e) \ 53*d4481576SEvan Quan ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 54*d4481576SEvan Quan 55*d4481576SEvan Quan #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ 56*d4481576SEvan Quan ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) 57*d4481576SEvan Quan 58e098bc96SEvan Quan void amdgpu_dpm_print_class_info(u32 class, u32 class2) 59e098bc96SEvan Quan { 60e098bc96SEvan Quan const char *s; 61e098bc96SEvan Quan 62e098bc96SEvan Quan switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 63e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 64e098bc96SEvan Quan default: 65e098bc96SEvan Quan s = "none"; 66e098bc96SEvan Quan break; 67e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 68e098bc96SEvan Quan s = "battery"; 69e098bc96SEvan Quan break; 70e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 71e098bc96SEvan Quan s = "balanced"; 72e098bc96SEvan Quan break; 73e098bc96SEvan Quan case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 74e098bc96SEvan Quan s = "performance"; 75e098bc96SEvan Quan break; 76e098bc96SEvan Quan } 77e098bc96SEvan Quan printk("\tui class: %s\n", s); 78e098bc96SEvan Quan printk("\tinternal class:"); 79e098bc96SEvan Quan if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 80e098bc96SEvan Quan (class2 == 0)) 81e098bc96SEvan Quan pr_cont(" none"); 82e098bc96SEvan Quan else { 83e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 84e098bc96SEvan Quan pr_cont(" boot"); 85e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 86e098bc96SEvan Quan pr_cont(" thermal"); 87e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 88e098bc96SEvan Quan pr_cont(" limited_pwr"); 89e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_REST) 90e098bc96SEvan Quan pr_cont(" rest"); 91e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 92e098bc96SEvan Quan pr_cont(" forced"); 93e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 94e098bc96SEvan Quan pr_cont(" 3d_perf"); 95e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 96e098bc96SEvan Quan pr_cont(" ovrdrv"); 97e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 98e098bc96SEvan Quan pr_cont(" uvd"); 99e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 100e098bc96SEvan Quan pr_cont(" 3d_low"); 101e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 102e098bc96SEvan Quan pr_cont(" acpi"); 103e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 104e098bc96SEvan Quan pr_cont(" uvd_hd2"); 105e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 106e098bc96SEvan Quan pr_cont(" uvd_hd"); 107e098bc96SEvan Quan if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 108e098bc96SEvan Quan pr_cont(" uvd_sd"); 109e098bc96SEvan Quan if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 110e098bc96SEvan Quan pr_cont(" limited_pwr2"); 111e098bc96SEvan Quan if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 112e098bc96SEvan Quan pr_cont(" ulv"); 113e098bc96SEvan Quan if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 114e098bc96SEvan Quan pr_cont(" uvd_mvc"); 115e098bc96SEvan Quan } 116e098bc96SEvan Quan pr_cont("\n"); 117e098bc96SEvan Quan } 118e098bc96SEvan Quan 119e098bc96SEvan Quan void amdgpu_dpm_print_cap_info(u32 caps) 120e098bc96SEvan Quan { 121e098bc96SEvan Quan printk("\tcaps:"); 122e098bc96SEvan Quan if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 123e098bc96SEvan Quan pr_cont(" single_disp"); 124e098bc96SEvan Quan if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 125e098bc96SEvan Quan pr_cont(" video"); 126e098bc96SEvan Quan if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 127e098bc96SEvan Quan pr_cont(" no_dc"); 128e098bc96SEvan Quan pr_cont("\n"); 129e098bc96SEvan Quan } 130e098bc96SEvan Quan 131e098bc96SEvan Quan void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, 132e098bc96SEvan Quan struct amdgpu_ps *rps) 133e098bc96SEvan Quan { 134e098bc96SEvan Quan printk("\tstatus:"); 135e098bc96SEvan Quan if (rps == adev->pm.dpm.current_ps) 136e098bc96SEvan Quan pr_cont(" c"); 137e098bc96SEvan Quan if (rps == adev->pm.dpm.requested_ps) 138e098bc96SEvan Quan pr_cont(" r"); 139e098bc96SEvan Quan if (rps == adev->pm.dpm.boot_ps) 140e098bc96SEvan Quan pr_cont(" b"); 141e098bc96SEvan Quan pr_cont("\n"); 142e098bc96SEvan Quan } 143e098bc96SEvan Quan 144*d4481576SEvan Quan static void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 145e098bc96SEvan Quan { 1464a580877SLuben Tuikov struct drm_device *ddev = adev_to_drm(adev); 147e098bc96SEvan Quan struct drm_crtc *crtc; 148e098bc96SEvan Quan struct amdgpu_crtc *amdgpu_crtc; 149e098bc96SEvan Quan 150e098bc96SEvan Quan adev->pm.dpm.new_active_crtcs = 0; 151e098bc96SEvan Quan adev->pm.dpm.new_active_crtc_count = 0; 152e098bc96SEvan Quan if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 153e098bc96SEvan Quan list_for_each_entry(crtc, 154e098bc96SEvan Quan &ddev->mode_config.crtc_list, head) { 155e098bc96SEvan Quan amdgpu_crtc = to_amdgpu_crtc(crtc); 156e098bc96SEvan Quan if (amdgpu_crtc->enabled) { 157e098bc96SEvan Quan adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 158e098bc96SEvan Quan adev->pm.dpm.new_active_crtc_count++; 159e098bc96SEvan Quan } 160e098bc96SEvan Quan } 161e098bc96SEvan Quan } 162e098bc96SEvan Quan } 163e098bc96SEvan Quan 164e098bc96SEvan Quan 165e098bc96SEvan Quan u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 166e098bc96SEvan Quan { 1674a580877SLuben Tuikov struct drm_device *dev = adev_to_drm(adev); 168e098bc96SEvan Quan struct drm_crtc *crtc; 169e098bc96SEvan Quan struct amdgpu_crtc *amdgpu_crtc; 170e098bc96SEvan Quan u32 vblank_in_pixels; 171e098bc96SEvan Quan u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 172e098bc96SEvan Quan 173e098bc96SEvan Quan if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 174e098bc96SEvan Quan list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 175e098bc96SEvan Quan amdgpu_crtc = to_amdgpu_crtc(crtc); 176e098bc96SEvan Quan if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 177e098bc96SEvan Quan vblank_in_pixels = 178e098bc96SEvan Quan amdgpu_crtc->hw_mode.crtc_htotal * 179e098bc96SEvan Quan (amdgpu_crtc->hw_mode.crtc_vblank_end - 180e098bc96SEvan Quan amdgpu_crtc->hw_mode.crtc_vdisplay + 181e098bc96SEvan Quan (amdgpu_crtc->v_border * 2)); 182e098bc96SEvan Quan 183e098bc96SEvan Quan vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 184e098bc96SEvan Quan break; 185e098bc96SEvan Quan } 186e098bc96SEvan Quan } 187e098bc96SEvan Quan } 188e098bc96SEvan Quan 189e098bc96SEvan Quan return vblank_time_us; 190e098bc96SEvan Quan } 191e098bc96SEvan Quan 192*d4481576SEvan Quan static u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 193e098bc96SEvan Quan { 1944a580877SLuben Tuikov struct drm_device *dev = adev_to_drm(adev); 195e098bc96SEvan Quan struct drm_crtc *crtc; 196e098bc96SEvan Quan struct amdgpu_crtc *amdgpu_crtc; 197e098bc96SEvan Quan u32 vrefresh = 0; 198e098bc96SEvan Quan 199e098bc96SEvan Quan if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 200e098bc96SEvan Quan list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 201e098bc96SEvan Quan amdgpu_crtc = to_amdgpu_crtc(crtc); 202e098bc96SEvan Quan if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 203e098bc96SEvan Quan vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 204e098bc96SEvan Quan break; 205e098bc96SEvan Quan } 206e098bc96SEvan Quan } 207e098bc96SEvan Quan } 208e098bc96SEvan Quan 209e098bc96SEvan Quan return vrefresh; 210e098bc96SEvan Quan } 211e098bc96SEvan Quan 212e098bc96SEvan Quan bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) 213e098bc96SEvan Quan { 214e098bc96SEvan Quan switch (sensor) { 215e098bc96SEvan Quan case THERMAL_TYPE_RV6XX: 216e098bc96SEvan Quan case THERMAL_TYPE_RV770: 217e098bc96SEvan Quan case THERMAL_TYPE_EVERGREEN: 218e098bc96SEvan Quan case THERMAL_TYPE_SUMO: 219e098bc96SEvan Quan case THERMAL_TYPE_NI: 220e098bc96SEvan Quan case THERMAL_TYPE_SI: 221e098bc96SEvan Quan case THERMAL_TYPE_CI: 222e098bc96SEvan Quan case THERMAL_TYPE_KV: 223e098bc96SEvan Quan return true; 224e098bc96SEvan Quan case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 225e098bc96SEvan Quan case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 226e098bc96SEvan Quan return false; /* need special handling */ 227e098bc96SEvan Quan case THERMAL_TYPE_NONE: 228e098bc96SEvan Quan case THERMAL_TYPE_EXTERNAL: 229e098bc96SEvan Quan case THERMAL_TYPE_EXTERNAL_GPIO: 230e098bc96SEvan Quan default: 231e098bc96SEvan Quan return false; 232e098bc96SEvan Quan } 233e098bc96SEvan Quan } 234e098bc96SEvan Quan 235e098bc96SEvan Quan union power_info { 236e098bc96SEvan Quan struct _ATOM_POWERPLAY_INFO info; 237e098bc96SEvan Quan struct _ATOM_POWERPLAY_INFO_V2 info_2; 238e098bc96SEvan Quan struct _ATOM_POWERPLAY_INFO_V3 info_3; 239e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 240e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 241e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 242e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 243e098bc96SEvan Quan struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 244e098bc96SEvan Quan }; 245e098bc96SEvan Quan 246e098bc96SEvan Quan union fan_info { 247e098bc96SEvan Quan struct _ATOM_PPLIB_FANTABLE fan; 248e098bc96SEvan Quan struct _ATOM_PPLIB_FANTABLE2 fan2; 249e098bc96SEvan Quan struct _ATOM_PPLIB_FANTABLE3 fan3; 250e098bc96SEvan Quan }; 251e098bc96SEvan Quan 252e098bc96SEvan Quan static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, 253e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 254e098bc96SEvan Quan { 255e098bc96SEvan Quan u32 size = atom_table->ucNumEntries * 256e098bc96SEvan Quan sizeof(struct amdgpu_clock_voltage_dependency_entry); 257e098bc96SEvan Quan int i; 258e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 259e098bc96SEvan Quan 260e098bc96SEvan Quan amdgpu_table->entries = kzalloc(size, GFP_KERNEL); 261e098bc96SEvan Quan if (!amdgpu_table->entries) 262e098bc96SEvan Quan return -ENOMEM; 263e098bc96SEvan Quan 264e098bc96SEvan Quan entry = &atom_table->entries[0]; 265e098bc96SEvan Quan for (i = 0; i < atom_table->ucNumEntries; i++) { 266e098bc96SEvan Quan amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 267e098bc96SEvan Quan (entry->ucClockHigh << 16); 268e098bc96SEvan Quan amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); 269e098bc96SEvan Quan entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 270e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 271e098bc96SEvan Quan } 272e098bc96SEvan Quan amdgpu_table->count = atom_table->ucNumEntries; 273e098bc96SEvan Quan 274e098bc96SEvan Quan return 0; 275e098bc96SEvan Quan } 276e098bc96SEvan Quan 277e098bc96SEvan Quan int amdgpu_get_platform_caps(struct amdgpu_device *adev) 278e098bc96SEvan Quan { 279e098bc96SEvan Quan struct amdgpu_mode_info *mode_info = &adev->mode_info; 280e098bc96SEvan Quan union power_info *power_info; 281e098bc96SEvan Quan int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 282e098bc96SEvan Quan u16 data_offset; 283e098bc96SEvan Quan u8 frev, crev; 284e098bc96SEvan Quan 285e098bc96SEvan Quan if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 286e098bc96SEvan Quan &frev, &crev, &data_offset)) 287e098bc96SEvan Quan return -EINVAL; 288e098bc96SEvan Quan power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 289e098bc96SEvan Quan 290e098bc96SEvan Quan adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 291e098bc96SEvan Quan adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 292e098bc96SEvan Quan adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 293e098bc96SEvan Quan 294e098bc96SEvan Quan return 0; 295e098bc96SEvan Quan } 296e098bc96SEvan Quan 297e098bc96SEvan Quan /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 298e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 299e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 300e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 301e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 302e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 303e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 304e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 305e098bc96SEvan Quan #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 306e098bc96SEvan Quan 307e098bc96SEvan Quan int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) 308e098bc96SEvan Quan { 309e098bc96SEvan Quan struct amdgpu_mode_info *mode_info = &adev->mode_info; 310e098bc96SEvan Quan union power_info *power_info; 311e098bc96SEvan Quan union fan_info *fan_info; 312e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 313e098bc96SEvan Quan int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 314e098bc96SEvan Quan u16 data_offset; 315e098bc96SEvan Quan u8 frev, crev; 316e098bc96SEvan Quan int ret, i; 317e098bc96SEvan Quan 318e098bc96SEvan Quan if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 319e098bc96SEvan Quan &frev, &crev, &data_offset)) 320e098bc96SEvan Quan return -EINVAL; 321e098bc96SEvan Quan power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 322e098bc96SEvan Quan 323e098bc96SEvan Quan /* fan table */ 324e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 325e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 326e098bc96SEvan Quan if (power_info->pplib3.usFanTableOffset) { 327e098bc96SEvan Quan fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 328e098bc96SEvan Quan le16_to_cpu(power_info->pplib3.usFanTableOffset)); 329e098bc96SEvan Quan adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 330e098bc96SEvan Quan adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 331e098bc96SEvan Quan adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 332e098bc96SEvan Quan adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 333e098bc96SEvan Quan adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 334e098bc96SEvan Quan adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 335e098bc96SEvan Quan adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 336e098bc96SEvan Quan if (fan_info->fan.ucFanTableFormat >= 2) 337e098bc96SEvan Quan adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 338e098bc96SEvan Quan else 339e098bc96SEvan Quan adev->pm.dpm.fan.t_max = 10900; 340e098bc96SEvan Quan adev->pm.dpm.fan.cycle_delay = 100000; 341e098bc96SEvan Quan if (fan_info->fan.ucFanTableFormat >= 3) { 342e098bc96SEvan Quan adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; 343e098bc96SEvan Quan adev->pm.dpm.fan.default_max_fan_pwm = 344e098bc96SEvan Quan le16_to_cpu(fan_info->fan3.usFanPWMMax); 345e098bc96SEvan Quan adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; 346e098bc96SEvan Quan adev->pm.dpm.fan.fan_output_sensitivity = 347e098bc96SEvan Quan le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); 348e098bc96SEvan Quan } 349e098bc96SEvan Quan adev->pm.dpm.fan.ucode_fan_control = true; 350e098bc96SEvan Quan } 351e098bc96SEvan Quan } 352e098bc96SEvan Quan 353e098bc96SEvan Quan /* clock dependancy tables, shedding tables */ 354e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 355e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 356e098bc96SEvan Quan if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 357e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 358e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 359e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 360e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 361e098bc96SEvan Quan dep_table); 362e098bc96SEvan Quan if (ret) { 363e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 364e098bc96SEvan Quan return ret; 365e098bc96SEvan Quan } 366e098bc96SEvan Quan } 367e098bc96SEvan Quan if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 368e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 369e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 370e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 371e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 372e098bc96SEvan Quan dep_table); 373e098bc96SEvan Quan if (ret) { 374e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 375e098bc96SEvan Quan return ret; 376e098bc96SEvan Quan } 377e098bc96SEvan Quan } 378e098bc96SEvan Quan if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 379e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 380e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 381e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 382e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 383e098bc96SEvan Quan dep_table); 384e098bc96SEvan Quan if (ret) { 385e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 386e098bc96SEvan Quan return ret; 387e098bc96SEvan Quan } 388e098bc96SEvan Quan } 389e098bc96SEvan Quan if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 390e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 391e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 392e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 393e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 394e098bc96SEvan Quan dep_table); 395e098bc96SEvan Quan if (ret) { 396e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 397e098bc96SEvan Quan return ret; 398e098bc96SEvan Quan } 399e098bc96SEvan Quan } 400e098bc96SEvan Quan if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 401e098bc96SEvan Quan ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 402e098bc96SEvan Quan (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 403e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 404e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 405e098bc96SEvan Quan if (clk_v->ucNumEntries) { 406e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 407e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usSclkLow) | 408e098bc96SEvan Quan (clk_v->entries[0].ucSclkHigh << 16); 409e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 410e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usMclkLow) | 411e098bc96SEvan Quan (clk_v->entries[0].ucMclkHigh << 16); 412e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 413e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usVddc); 414e098bc96SEvan Quan adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 415e098bc96SEvan Quan le16_to_cpu(clk_v->entries[0].usVddci); 416e098bc96SEvan Quan } 417e098bc96SEvan Quan } 418e098bc96SEvan Quan if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 419e098bc96SEvan Quan ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 420e098bc96SEvan Quan (ATOM_PPLIB_PhaseSheddingLimits_Table *) 421e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 422e098bc96SEvan Quan le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 423e098bc96SEvan Quan ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 424e098bc96SEvan Quan 425e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 426e098bc96SEvan Quan kcalloc(psl->ucNumEntries, 427e098bc96SEvan Quan sizeof(struct amdgpu_phase_shedding_limits_entry), 428e098bc96SEvan Quan GFP_KERNEL); 429e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 430e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 431e098bc96SEvan Quan return -ENOMEM; 432e098bc96SEvan Quan } 433e098bc96SEvan Quan 434e098bc96SEvan Quan entry = &psl->entries[0]; 435e098bc96SEvan Quan for (i = 0; i < psl->ucNumEntries; i++) { 436e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 437e098bc96SEvan Quan le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 438e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 439e098bc96SEvan Quan le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 440e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 441e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 442e098bc96SEvan Quan entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 443e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 444e098bc96SEvan Quan } 445e098bc96SEvan Quan adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 446e098bc96SEvan Quan psl->ucNumEntries; 447e098bc96SEvan Quan } 448e098bc96SEvan Quan } 449e098bc96SEvan Quan 450e098bc96SEvan Quan /* cac data */ 451e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 452e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 453e098bc96SEvan Quan adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 454e098bc96SEvan Quan adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 455e098bc96SEvan Quan adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; 456e098bc96SEvan Quan adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 457e098bc96SEvan Quan if (adev->pm.dpm.tdp_od_limit) 458e098bc96SEvan Quan adev->pm.dpm.power_control = true; 459e098bc96SEvan Quan else 460e098bc96SEvan Quan adev->pm.dpm.power_control = false; 461e098bc96SEvan Quan adev->pm.dpm.tdp_adjustment = 0; 462e098bc96SEvan Quan adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 463e098bc96SEvan Quan adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 464e098bc96SEvan Quan adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 465e098bc96SEvan Quan if (power_info->pplib5.usCACLeakageTableOffset) { 466e098bc96SEvan Quan ATOM_PPLIB_CAC_Leakage_Table *cac_table = 467e098bc96SEvan Quan (ATOM_PPLIB_CAC_Leakage_Table *) 468e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 469e098bc96SEvan Quan le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 470e098bc96SEvan Quan ATOM_PPLIB_CAC_Leakage_Record *entry; 471e098bc96SEvan Quan u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); 472e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 473e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { 474e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 475e098bc96SEvan Quan return -ENOMEM; 476e098bc96SEvan Quan } 477e098bc96SEvan Quan entry = &cac_table->entries[0]; 478e098bc96SEvan Quan for (i = 0; i < cac_table->ucNumEntries; i++) { 479e098bc96SEvan Quan if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 480e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 481e098bc96SEvan Quan le16_to_cpu(entry->usVddc1); 482e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 483e098bc96SEvan Quan le16_to_cpu(entry->usVddc2); 484e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 485e098bc96SEvan Quan le16_to_cpu(entry->usVddc3); 486e098bc96SEvan Quan } else { 487e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 488e098bc96SEvan Quan le16_to_cpu(entry->usVddc); 489e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 490e098bc96SEvan Quan le32_to_cpu(entry->ulLeakageValue); 491e098bc96SEvan Quan } 492e098bc96SEvan Quan entry = (ATOM_PPLIB_CAC_Leakage_Record *) 493e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 494e098bc96SEvan Quan } 495e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 496e098bc96SEvan Quan } 497e098bc96SEvan Quan } 498e098bc96SEvan Quan 499e098bc96SEvan Quan /* ext tables */ 500e098bc96SEvan Quan if (le16_to_cpu(power_info->pplib.usTableSize) >= 501e098bc96SEvan Quan sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 502e098bc96SEvan Quan ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 503e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 504e098bc96SEvan Quan le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 505e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 506e098bc96SEvan Quan ext_hdr->usVCETableOffset) { 507e098bc96SEvan Quan VCEClockInfoArray *array = (VCEClockInfoArray *) 508e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 509e098bc96SEvan Quan le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 510e098bc96SEvan Quan ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 511e098bc96SEvan Quan (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 512e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 513e098bc96SEvan Quan le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 514e098bc96SEvan Quan 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 515e098bc96SEvan Quan ATOM_PPLIB_VCE_State_Table *states = 516e098bc96SEvan Quan (ATOM_PPLIB_VCE_State_Table *) 517e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 518e098bc96SEvan Quan le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 519e098bc96SEvan Quan 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 520e098bc96SEvan Quan 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 521e098bc96SEvan Quan ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 522e098bc96SEvan Quan ATOM_PPLIB_VCE_State_Record *state_entry; 523e098bc96SEvan Quan VCEClockInfo *vce_clk; 524e098bc96SEvan Quan u32 size = limits->numEntries * 525e098bc96SEvan Quan sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); 526e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 527e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 528e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 529e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 530e098bc96SEvan Quan return -ENOMEM; 531e098bc96SEvan Quan } 532e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 533e098bc96SEvan Quan limits->numEntries; 534e098bc96SEvan Quan entry = &limits->entries[0]; 535e098bc96SEvan Quan state_entry = &states->entries[0]; 536e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 537e098bc96SEvan Quan vce_clk = (VCEClockInfo *) 538e098bc96SEvan Quan ((u8 *)&array->entries[0] + 539e098bc96SEvan Quan (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 540e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 541e098bc96SEvan Quan le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 542e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 543e098bc96SEvan Quan le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 544e098bc96SEvan Quan adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 545e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 546e098bc96SEvan Quan entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 547e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 548e098bc96SEvan Quan } 549e098bc96SEvan Quan adev->pm.dpm.num_of_vce_states = 550e098bc96SEvan Quan states->numEntries > AMD_MAX_VCE_LEVELS ? 551e098bc96SEvan Quan AMD_MAX_VCE_LEVELS : states->numEntries; 552e098bc96SEvan Quan for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 553e098bc96SEvan Quan vce_clk = (VCEClockInfo *) 554e098bc96SEvan Quan ((u8 *)&array->entries[0] + 555e098bc96SEvan Quan (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 556e098bc96SEvan Quan adev->pm.dpm.vce_states[i].evclk = 557e098bc96SEvan Quan le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 558e098bc96SEvan Quan adev->pm.dpm.vce_states[i].ecclk = 559e098bc96SEvan Quan le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 560e098bc96SEvan Quan adev->pm.dpm.vce_states[i].clk_idx = 561e098bc96SEvan Quan state_entry->ucClockInfoIndex & 0x3f; 562e098bc96SEvan Quan adev->pm.dpm.vce_states[i].pstate = 563e098bc96SEvan Quan (state_entry->ucClockInfoIndex & 0xc0) >> 6; 564e098bc96SEvan Quan state_entry = (ATOM_PPLIB_VCE_State_Record *) 565e098bc96SEvan Quan ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 566e098bc96SEvan Quan } 567e098bc96SEvan Quan } 568e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 569e098bc96SEvan Quan ext_hdr->usUVDTableOffset) { 570e098bc96SEvan Quan UVDClockInfoArray *array = (UVDClockInfoArray *) 571e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 572e098bc96SEvan Quan le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 573e098bc96SEvan Quan ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 574e098bc96SEvan Quan (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 575e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 576e098bc96SEvan Quan le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 577e098bc96SEvan Quan 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 578e098bc96SEvan Quan ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 579e098bc96SEvan Quan u32 size = limits->numEntries * 580e098bc96SEvan Quan sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); 581e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 582e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 583e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 584e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 585e098bc96SEvan Quan return -ENOMEM; 586e098bc96SEvan Quan } 587e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 588e098bc96SEvan Quan limits->numEntries; 589e098bc96SEvan Quan entry = &limits->entries[0]; 590e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 591e098bc96SEvan Quan UVDClockInfo *uvd_clk = (UVDClockInfo *) 592e098bc96SEvan Quan ((u8 *)&array->entries[0] + 593e098bc96SEvan Quan (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 594e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 595e098bc96SEvan Quan le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 596e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 597e098bc96SEvan Quan le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 598e098bc96SEvan Quan adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 599e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 600e098bc96SEvan Quan entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 601e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 602e098bc96SEvan Quan } 603e098bc96SEvan Quan } 604e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 605e098bc96SEvan Quan ext_hdr->usSAMUTableOffset) { 606e098bc96SEvan Quan ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 607e098bc96SEvan Quan (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 608e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 609e098bc96SEvan Quan le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 610e098bc96SEvan Quan ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 611e098bc96SEvan Quan u32 size = limits->numEntries * 612e098bc96SEvan Quan sizeof(struct amdgpu_clock_voltage_dependency_entry); 613e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 614e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 615e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 616e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 617e098bc96SEvan Quan return -ENOMEM; 618e098bc96SEvan Quan } 619e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 620e098bc96SEvan Quan limits->numEntries; 621e098bc96SEvan Quan entry = &limits->entries[0]; 622e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 623e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 624e098bc96SEvan Quan le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 625e098bc96SEvan Quan adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 626e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 627e098bc96SEvan Quan entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 628e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 629e098bc96SEvan Quan } 630e098bc96SEvan Quan } 631e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 632e098bc96SEvan Quan ext_hdr->usPPMTableOffset) { 633e098bc96SEvan Quan ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 634e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 635e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPPMTableOffset)); 636e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table = 637e098bc96SEvan Quan kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); 638e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.ppm_table) { 639e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 640e098bc96SEvan Quan return -ENOMEM; 641e098bc96SEvan Quan } 642e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 643e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 644e098bc96SEvan Quan le16_to_cpu(ppm->usCpuCoreNumber); 645e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->platform_tdp = 646e098bc96SEvan Quan le32_to_cpu(ppm->ulPlatformTDP); 647e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 648e098bc96SEvan Quan le32_to_cpu(ppm->ulSmallACPlatformTDP); 649e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->platform_tdc = 650e098bc96SEvan Quan le32_to_cpu(ppm->ulPlatformTDC); 651e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 652e098bc96SEvan Quan le32_to_cpu(ppm->ulSmallACPlatformTDC); 653e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->apu_tdp = 654e098bc96SEvan Quan le32_to_cpu(ppm->ulApuTDP); 655e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 656e098bc96SEvan Quan le32_to_cpu(ppm->ulDGpuTDP); 657e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 658e098bc96SEvan Quan le32_to_cpu(ppm->ulDGpuUlvPower); 659e098bc96SEvan Quan adev->pm.dpm.dyn_state.ppm_table->tj_max = 660e098bc96SEvan Quan le32_to_cpu(ppm->ulTjmax); 661e098bc96SEvan Quan } 662e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 663e098bc96SEvan Quan ext_hdr->usACPTableOffset) { 664e098bc96SEvan Quan ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 665e098bc96SEvan Quan (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 666e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 667e098bc96SEvan Quan le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 668e098bc96SEvan Quan ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 669e098bc96SEvan Quan u32 size = limits->numEntries * 670e098bc96SEvan Quan sizeof(struct amdgpu_clock_voltage_dependency_entry); 671e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 672e098bc96SEvan Quan kzalloc(size, GFP_KERNEL); 673e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 674e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 675e098bc96SEvan Quan return -ENOMEM; 676e098bc96SEvan Quan } 677e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 678e098bc96SEvan Quan limits->numEntries; 679e098bc96SEvan Quan entry = &limits->entries[0]; 680e098bc96SEvan Quan for (i = 0; i < limits->numEntries; i++) { 681e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 682e098bc96SEvan Quan le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 683e098bc96SEvan Quan adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 684e098bc96SEvan Quan le16_to_cpu(entry->usVoltage); 685e098bc96SEvan Quan entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 686e098bc96SEvan Quan ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 687e098bc96SEvan Quan } 688e098bc96SEvan Quan } 689e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 690e098bc96SEvan Quan ext_hdr->usPowerTuneTableOffset) { 691e098bc96SEvan Quan u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 692e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 693e098bc96SEvan Quan ATOM_PowerTune_Table *pt; 694e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table = 695e098bc96SEvan Quan kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); 696e098bc96SEvan Quan if (!adev->pm.dpm.dyn_state.cac_tdp_table) { 697e098bc96SEvan Quan amdgpu_free_extended_power_table(adev); 698e098bc96SEvan Quan return -ENOMEM; 699e098bc96SEvan Quan } 700e098bc96SEvan Quan if (rev > 0) { 701e098bc96SEvan Quan ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 702e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 703e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 704e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 705e098bc96SEvan Quan ppt->usMaximumPowerDeliveryLimit; 706e098bc96SEvan Quan pt = &ppt->power_tune_table; 707e098bc96SEvan Quan } else { 708e098bc96SEvan Quan ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 709e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 710e098bc96SEvan Quan le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 711e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 712e098bc96SEvan Quan pt = &ppt->power_tune_table; 713e098bc96SEvan Quan } 714e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 715e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 716e098bc96SEvan Quan le16_to_cpu(pt->usConfigurableTDP); 717e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 718e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 719e098bc96SEvan Quan le16_to_cpu(pt->usBatteryPowerLimit); 720e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 721e098bc96SEvan Quan le16_to_cpu(pt->usSmallPowerLimit); 722e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 723e098bc96SEvan Quan le16_to_cpu(pt->usLowCACLeakage); 724e098bc96SEvan Quan adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 725e098bc96SEvan Quan le16_to_cpu(pt->usHighCACLeakage); 726e098bc96SEvan Quan } 727e098bc96SEvan Quan if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && 728e098bc96SEvan Quan ext_hdr->usSclkVddgfxTableOffset) { 729e098bc96SEvan Quan dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 730e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset + 731e098bc96SEvan Quan le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); 732e098bc96SEvan Quan ret = amdgpu_parse_clk_voltage_dep_table( 733e098bc96SEvan Quan &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, 734e098bc96SEvan Quan dep_table); 735e098bc96SEvan Quan if (ret) { 736e098bc96SEvan Quan kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); 737e098bc96SEvan Quan return ret; 738e098bc96SEvan Quan } 739e098bc96SEvan Quan } 740e098bc96SEvan Quan } 741e098bc96SEvan Quan 742e098bc96SEvan Quan return 0; 743e098bc96SEvan Quan } 744e098bc96SEvan Quan 745e098bc96SEvan Quan void amdgpu_free_extended_power_table(struct amdgpu_device *adev) 746e098bc96SEvan Quan { 747e098bc96SEvan Quan struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; 748e098bc96SEvan Quan 749e098bc96SEvan Quan kfree(dyn_state->vddc_dependency_on_sclk.entries); 750e098bc96SEvan Quan kfree(dyn_state->vddci_dependency_on_mclk.entries); 751e098bc96SEvan Quan kfree(dyn_state->vddc_dependency_on_mclk.entries); 752e098bc96SEvan Quan kfree(dyn_state->mvdd_dependency_on_mclk.entries); 753e098bc96SEvan Quan kfree(dyn_state->cac_leakage_table.entries); 754e098bc96SEvan Quan kfree(dyn_state->phase_shedding_limits_table.entries); 755e098bc96SEvan Quan kfree(dyn_state->ppm_table); 756e098bc96SEvan Quan kfree(dyn_state->cac_tdp_table); 757e098bc96SEvan Quan kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 758e098bc96SEvan Quan kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 759e098bc96SEvan Quan kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 760e098bc96SEvan Quan kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 761e098bc96SEvan Quan kfree(dyn_state->vddgfx_dependency_on_sclk.entries); 762e098bc96SEvan Quan } 763e098bc96SEvan Quan 764e098bc96SEvan Quan static const char *pp_lib_thermal_controller_names[] = { 765e098bc96SEvan Quan "NONE", 766e098bc96SEvan Quan "lm63", 767e098bc96SEvan Quan "adm1032", 768e098bc96SEvan Quan "adm1030", 769e098bc96SEvan Quan "max6649", 770e098bc96SEvan Quan "lm64", 771e098bc96SEvan Quan "f75375", 772e098bc96SEvan Quan "RV6xx", 773e098bc96SEvan Quan "RV770", 774e098bc96SEvan Quan "adt7473", 775e098bc96SEvan Quan "NONE", 776e098bc96SEvan Quan "External GPIO", 777e098bc96SEvan Quan "Evergreen", 778e098bc96SEvan Quan "emc2103", 779e098bc96SEvan Quan "Sumo", 780e098bc96SEvan Quan "Northern Islands", 781e098bc96SEvan Quan "Southern Islands", 782e098bc96SEvan Quan "lm96163", 783e098bc96SEvan Quan "Sea Islands", 784e098bc96SEvan Quan "Kaveri/Kabini", 785e098bc96SEvan Quan }; 786e098bc96SEvan Quan 787e098bc96SEvan Quan void amdgpu_add_thermal_controller(struct amdgpu_device *adev) 788e098bc96SEvan Quan { 789e098bc96SEvan Quan struct amdgpu_mode_info *mode_info = &adev->mode_info; 790e098bc96SEvan Quan ATOM_PPLIB_POWERPLAYTABLE *power_table; 791e098bc96SEvan Quan int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 792e098bc96SEvan Quan ATOM_PPLIB_THERMALCONTROLLER *controller; 793e098bc96SEvan Quan struct amdgpu_i2c_bus_rec i2c_bus; 794e098bc96SEvan Quan u16 data_offset; 795e098bc96SEvan Quan u8 frev, crev; 796e098bc96SEvan Quan 797e098bc96SEvan Quan if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 798e098bc96SEvan Quan &frev, &crev, &data_offset)) 799e098bc96SEvan Quan return; 800e098bc96SEvan Quan power_table = (ATOM_PPLIB_POWERPLAYTABLE *) 801e098bc96SEvan Quan (mode_info->atom_context->bios + data_offset); 802e098bc96SEvan Quan controller = &power_table->sThermalController; 803e098bc96SEvan Quan 804e098bc96SEvan Quan /* add the i2c bus for thermal/fan chip */ 805e098bc96SEvan Quan if (controller->ucType > 0) { 806e098bc96SEvan Quan if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) 807e098bc96SEvan Quan adev->pm.no_fan = true; 808e098bc96SEvan Quan adev->pm.fan_pulses_per_revolution = 809e098bc96SEvan Quan controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; 810e098bc96SEvan Quan if (adev->pm.fan_pulses_per_revolution) { 811e098bc96SEvan Quan adev->pm.fan_min_rpm = controller->ucFanMinRPM; 812e098bc96SEvan Quan adev->pm.fan_max_rpm = controller->ucFanMaxRPM; 813e098bc96SEvan Quan } 814e098bc96SEvan Quan if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { 815e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 816e098bc96SEvan Quan (controller->ucFanParameters & 817e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 818e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; 819e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { 820e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 821e098bc96SEvan Quan (controller->ucFanParameters & 822e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 823e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_RV770; 824e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { 825e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 826e098bc96SEvan Quan (controller->ucFanParameters & 827e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 828e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; 829e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { 830e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 831e098bc96SEvan Quan (controller->ucFanParameters & 832e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 833e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; 834e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { 835e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 836e098bc96SEvan Quan (controller->ucFanParameters & 837e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 838e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_NI; 839e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { 840e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 841e098bc96SEvan Quan (controller->ucFanParameters & 842e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 843e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_SI; 844e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { 845e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 846e098bc96SEvan Quan (controller->ucFanParameters & 847e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 848e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_CI; 849e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { 850e098bc96SEvan Quan DRM_INFO("Internal thermal controller %s fan control\n", 851e098bc96SEvan Quan (controller->ucFanParameters & 852e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 853e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_KV; 854e098bc96SEvan Quan } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { 855e098bc96SEvan Quan DRM_INFO("External GPIO thermal controller %s fan control\n", 856e098bc96SEvan Quan (controller->ucFanParameters & 857e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 858e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; 859e098bc96SEvan Quan } else if (controller->ucType == 860e098bc96SEvan Quan ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { 861e098bc96SEvan Quan DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", 862e098bc96SEvan Quan (controller->ucFanParameters & 863e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 864e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; 865e098bc96SEvan Quan } else if (controller->ucType == 866e098bc96SEvan Quan ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { 867e098bc96SEvan Quan DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", 868e098bc96SEvan Quan (controller->ucFanParameters & 869e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 870e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; 871e098bc96SEvan Quan } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { 872e098bc96SEvan Quan DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 873e098bc96SEvan Quan pp_lib_thermal_controller_names[controller->ucType], 874e098bc96SEvan Quan controller->ucI2cAddress >> 1, 875e098bc96SEvan Quan (controller->ucFanParameters & 876e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 877e098bc96SEvan Quan adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; 878e098bc96SEvan Quan i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); 879e098bc96SEvan Quan adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); 880e098bc96SEvan Quan if (adev->pm.i2c_bus) { 881e098bc96SEvan Quan struct i2c_board_info info = { }; 882e098bc96SEvan Quan const char *name = pp_lib_thermal_controller_names[controller->ucType]; 883e098bc96SEvan Quan info.addr = controller->ucI2cAddress >> 1; 884e098bc96SEvan Quan strlcpy(info.type, name, sizeof(info.type)); 885e098bc96SEvan Quan i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); 886e098bc96SEvan Quan } 887e098bc96SEvan Quan } else { 888e098bc96SEvan Quan DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", 889e098bc96SEvan Quan controller->ucType, 890e098bc96SEvan Quan controller->ucI2cAddress >> 1, 891e098bc96SEvan Quan (controller->ucFanParameters & 892e098bc96SEvan Quan ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 893e098bc96SEvan Quan } 894e098bc96SEvan Quan } 895e098bc96SEvan Quan } 896e098bc96SEvan Quan 897e098bc96SEvan Quan enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, 898e098bc96SEvan Quan u32 sys_mask, 899e098bc96SEvan Quan enum amdgpu_pcie_gen asic_gen, 900e098bc96SEvan Quan enum amdgpu_pcie_gen default_gen) 901e098bc96SEvan Quan { 902e098bc96SEvan Quan switch (asic_gen) { 903e098bc96SEvan Quan case AMDGPU_PCIE_GEN1: 904e098bc96SEvan Quan return AMDGPU_PCIE_GEN1; 905e098bc96SEvan Quan case AMDGPU_PCIE_GEN2: 906e098bc96SEvan Quan return AMDGPU_PCIE_GEN2; 907e098bc96SEvan Quan case AMDGPU_PCIE_GEN3: 908e098bc96SEvan Quan return AMDGPU_PCIE_GEN3; 909e098bc96SEvan Quan default: 910e098bc96SEvan Quan if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && 911e098bc96SEvan Quan (default_gen == AMDGPU_PCIE_GEN3)) 912e098bc96SEvan Quan return AMDGPU_PCIE_GEN3; 913e098bc96SEvan Quan else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && 914e098bc96SEvan Quan (default_gen == AMDGPU_PCIE_GEN2)) 915e098bc96SEvan Quan return AMDGPU_PCIE_GEN2; 916e098bc96SEvan Quan else 917e098bc96SEvan Quan return AMDGPU_PCIE_GEN1; 918e098bc96SEvan Quan } 919e098bc96SEvan Quan return AMDGPU_PCIE_GEN1; 920e098bc96SEvan Quan } 921e098bc96SEvan Quan 922e098bc96SEvan Quan struct amd_vce_state* 923e098bc96SEvan Quan amdgpu_get_vce_clock_state(void *handle, u32 idx) 924e098bc96SEvan Quan { 925e098bc96SEvan Quan struct amdgpu_device *adev = (struct amdgpu_device *)handle; 926e098bc96SEvan Quan 927e098bc96SEvan Quan if (idx < adev->pm.dpm.num_of_vce_states) 928e098bc96SEvan Quan return &adev->pm.dpm.vce_states[idx]; 929e098bc96SEvan Quan 930e098bc96SEvan Quan return NULL; 931e098bc96SEvan Quan } 932e098bc96SEvan Quan 933e098bc96SEvan Quan int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 934e098bc96SEvan Quan { 935bc7d6c12SDarren Powell const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 936e098bc96SEvan Quan 937bc7d6c12SDarren Powell return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); 938e098bc96SEvan Quan } 939e098bc96SEvan Quan 940e098bc96SEvan Quan int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 941e098bc96SEvan Quan { 942bc7d6c12SDarren Powell const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 943e098bc96SEvan Quan 944bc7d6c12SDarren Powell return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); 945e098bc96SEvan Quan } 946e098bc96SEvan Quan 947e098bc96SEvan Quan int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 948e098bc96SEvan Quan { 949e098bc96SEvan Quan int ret = 0; 950bc7d6c12SDarren Powell const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 9516ee27ee2SEvan Quan enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 9526ee27ee2SEvan Quan 9536ee27ee2SEvan Quan if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 9546ee27ee2SEvan Quan dev_dbg(adev->dev, "IP block%d already in the target %s state!", 9556ee27ee2SEvan Quan block_type, gate ? "gate" : "ungate"); 9566ee27ee2SEvan Quan return 0; 9576ee27ee2SEvan Quan } 958e098bc96SEvan Quan 959e098bc96SEvan Quan switch (block_type) { 960e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_UVD: 961e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_VCE: 962bc7d6c12SDarren Powell if (pp_funcs && pp_funcs->set_powergating_by_smu) { 963e098bc96SEvan Quan /* 964e098bc96SEvan Quan * TODO: need a better lock mechanism 965e098bc96SEvan Quan * 966e098bc96SEvan Quan * Here adev->pm.mutex lock protection is enforced on 967e098bc96SEvan Quan * UVD and VCE cases only. Since for other cases, there 968e098bc96SEvan Quan * may be already lock protection in amdgpu_pm.c. 969e098bc96SEvan Quan * This is a quick fix for the deadlock issue below. 970e098bc96SEvan Quan * NFO: task ocltst:2028 blocked for more than 120 seconds. 971e098bc96SEvan Quan * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu 972e098bc96SEvan Quan * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. 973e098bc96SEvan Quan * cltst D 0 2028 2026 0x00000000 974e098bc96SEvan Quan * all Trace: 975e098bc96SEvan Quan * __schedule+0x2c0/0x870 976e098bc96SEvan Quan * schedule+0x2c/0x70 977e098bc96SEvan Quan * schedule_preempt_disabled+0xe/0x10 978e098bc96SEvan Quan * __mutex_lock.isra.9+0x26d/0x4e0 979e098bc96SEvan Quan * __mutex_lock_slowpath+0x13/0x20 980e098bc96SEvan Quan * ? __mutex_lock_slowpath+0x13/0x20 981e098bc96SEvan Quan * mutex_lock+0x2f/0x40 982e098bc96SEvan Quan * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] 983e098bc96SEvan Quan * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] 984e098bc96SEvan Quan * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] 985e098bc96SEvan Quan * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] 986e098bc96SEvan Quan * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] 987e098bc96SEvan Quan * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] 988e098bc96SEvan Quan */ 989e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 990bc7d6c12SDarren Powell ret = (pp_funcs->set_powergating_by_smu( 991e098bc96SEvan Quan (adev)->powerplay.pp_handle, block_type, gate)); 992e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 993e098bc96SEvan Quan } 994e098bc96SEvan Quan break; 995e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_GFX: 996e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_VCN: 997e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_SDMA: 998e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_JPEG: 999e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_GMC: 1000e098bc96SEvan Quan case AMD_IP_BLOCK_TYPE_ACP: 1001bc7d6c12SDarren Powell if (pp_funcs && pp_funcs->set_powergating_by_smu) { 1002bc7d6c12SDarren Powell ret = (pp_funcs->set_powergating_by_smu( 1003e098bc96SEvan Quan (adev)->powerplay.pp_handle, block_type, gate)); 1004bc7d6c12SDarren Powell } 1005e098bc96SEvan Quan break; 1006e098bc96SEvan Quan default: 1007e098bc96SEvan Quan break; 1008e098bc96SEvan Quan } 1009e098bc96SEvan Quan 10106ee27ee2SEvan Quan if (!ret) 10116ee27ee2SEvan Quan atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 10126ee27ee2SEvan Quan 1013e098bc96SEvan Quan return ret; 1014e098bc96SEvan Quan } 1015e098bc96SEvan Quan 1016e098bc96SEvan Quan int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 1017e098bc96SEvan Quan { 1018e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1019e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1020e098bc96SEvan Quan int ret = 0; 1021e098bc96SEvan Quan 1022e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1023e098bc96SEvan Quan return -ENOENT; 1024e098bc96SEvan Quan 1025e098bc96SEvan Quan /* enter BACO state */ 1026e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1027e098bc96SEvan Quan 1028e098bc96SEvan Quan return ret; 1029e098bc96SEvan Quan } 1030e098bc96SEvan Quan 1031e098bc96SEvan Quan int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 1032e098bc96SEvan Quan { 1033e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1034e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1035e098bc96SEvan Quan int ret = 0; 1036e098bc96SEvan Quan 1037e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1038e098bc96SEvan Quan return -ENOENT; 1039e098bc96SEvan Quan 1040e098bc96SEvan Quan /* exit BACO state */ 1041e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1042e098bc96SEvan Quan 1043e098bc96SEvan Quan return ret; 1044e098bc96SEvan Quan } 1045e098bc96SEvan Quan 1046e098bc96SEvan Quan int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 1047e098bc96SEvan Quan enum pp_mp1_state mp1_state) 1048e098bc96SEvan Quan { 1049e098bc96SEvan Quan int ret = 0; 1050bab0f602SDarren Powell const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1051e098bc96SEvan Quan 1052bab0f602SDarren Powell if (pp_funcs && pp_funcs->set_mp1_state) { 1053bab0f602SDarren Powell ret = pp_funcs->set_mp1_state( 1054e098bc96SEvan Quan adev->powerplay.pp_handle, 1055e098bc96SEvan Quan mp1_state); 1056e098bc96SEvan Quan } 1057e098bc96SEvan Quan 1058e098bc96SEvan Quan return ret; 1059e098bc96SEvan Quan } 1060e098bc96SEvan Quan 1061e098bc96SEvan Quan bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 1062e098bc96SEvan Quan { 1063e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1064e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1065e098bc96SEvan Quan bool baco_cap; 1066e098bc96SEvan Quan 1067e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 1068e098bc96SEvan Quan return false; 1069e098bc96SEvan Quan 1070e098bc96SEvan Quan if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) 1071e098bc96SEvan Quan return false; 1072e098bc96SEvan Quan 10739ab5001aSDarren Powell return baco_cap; 1074e098bc96SEvan Quan } 1075e098bc96SEvan Quan 1076e098bc96SEvan Quan int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 1077e098bc96SEvan Quan { 1078e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1079e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1080e098bc96SEvan Quan 1081e098bc96SEvan Quan if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 1082e098bc96SEvan Quan return -ENOENT; 1083e098bc96SEvan Quan 1084e098bc96SEvan Quan return pp_funcs->asic_reset_mode_2(pp_handle); 1085e098bc96SEvan Quan } 1086e098bc96SEvan Quan 1087e098bc96SEvan Quan int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 1088e098bc96SEvan Quan { 1089e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1090e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1091e098bc96SEvan Quan int ret = 0; 1092e098bc96SEvan Quan 10939ab5001aSDarren Powell if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1094e098bc96SEvan Quan return -ENOENT; 1095e098bc96SEvan Quan 1096e098bc96SEvan Quan /* enter BACO state */ 1097e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1098e098bc96SEvan Quan if (ret) 1099e098bc96SEvan Quan return ret; 1100e098bc96SEvan Quan 1101e098bc96SEvan Quan /* exit BACO state */ 1102e098bc96SEvan Quan ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1103e098bc96SEvan Quan if (ret) 1104e098bc96SEvan Quan return ret; 1105e098bc96SEvan Quan 1106e098bc96SEvan Quan return 0; 1107e098bc96SEvan Quan } 1108e098bc96SEvan Quan 1109e098bc96SEvan Quan bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 1110e098bc96SEvan Quan { 1111e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1112e098bc96SEvan Quan 1113e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1114e098bc96SEvan Quan return smu_mode1_reset_is_support(smu); 1115e098bc96SEvan Quan 1116e098bc96SEvan Quan return false; 1117e098bc96SEvan Quan } 1118e098bc96SEvan Quan 1119e098bc96SEvan Quan int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 1120e098bc96SEvan Quan { 1121e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1122e098bc96SEvan Quan 1123e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1124e098bc96SEvan Quan return smu_mode1_reset(smu); 1125e098bc96SEvan Quan 1126e098bc96SEvan Quan return -EOPNOTSUPP; 1127e098bc96SEvan Quan } 1128e098bc96SEvan Quan 1129e098bc96SEvan Quan int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 1130e098bc96SEvan Quan enum PP_SMC_POWER_PROFILE type, 1131e098bc96SEvan Quan bool en) 1132e098bc96SEvan Quan { 1133bab0f602SDarren Powell const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1134e098bc96SEvan Quan int ret = 0; 1135e098bc96SEvan Quan 11367cf7a392SJingwen Chen if (amdgpu_sriov_vf(adev)) 11377cf7a392SJingwen Chen return 0; 11387cf7a392SJingwen Chen 1139bab0f602SDarren Powell if (pp_funcs && pp_funcs->switch_power_profile) 1140bab0f602SDarren Powell ret = pp_funcs->switch_power_profile( 1141e098bc96SEvan Quan adev->powerplay.pp_handle, type, en); 1142e098bc96SEvan Quan 1143e098bc96SEvan Quan return ret; 1144e098bc96SEvan Quan } 1145e098bc96SEvan Quan 1146e098bc96SEvan Quan int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 1147e098bc96SEvan Quan uint32_t pstate) 1148e098bc96SEvan Quan { 1149bab0f602SDarren Powell const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1150e098bc96SEvan Quan int ret = 0; 1151e098bc96SEvan Quan 1152bab0f602SDarren Powell if (pp_funcs && pp_funcs->set_xgmi_pstate) 1153bab0f602SDarren Powell ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 1154e098bc96SEvan Quan pstate); 1155e098bc96SEvan Quan 1156e098bc96SEvan Quan return ret; 1157e098bc96SEvan Quan } 1158e098bc96SEvan Quan 1159e098bc96SEvan Quan int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 1160e098bc96SEvan Quan uint32_t cstate) 1161e098bc96SEvan Quan { 1162e098bc96SEvan Quan int ret = 0; 1163e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1164e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1165e098bc96SEvan Quan 1166bab0f602SDarren Powell if (pp_funcs && pp_funcs->set_df_cstate) 1167e098bc96SEvan Quan ret = pp_funcs->set_df_cstate(pp_handle, cstate); 1168e098bc96SEvan Quan 1169e098bc96SEvan Quan return ret; 1170e098bc96SEvan Quan } 1171e098bc96SEvan Quan 1172e098bc96SEvan Quan int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 1173e098bc96SEvan Quan { 1174e098bc96SEvan Quan struct smu_context *smu = &adev->smu; 1175e098bc96SEvan Quan 1176e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1177e098bc96SEvan Quan return smu_allow_xgmi_power_down(smu, en); 1178e098bc96SEvan Quan 1179e098bc96SEvan Quan return 0; 1180e098bc96SEvan Quan } 1181e098bc96SEvan Quan 1182e098bc96SEvan Quan int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 1183e098bc96SEvan Quan { 1184e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1185e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = 1186e098bc96SEvan Quan adev->powerplay.pp_funcs; 1187e098bc96SEvan Quan int ret = 0; 1188e098bc96SEvan Quan 1189bab0f602SDarren Powell if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) 1190e098bc96SEvan Quan ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 1191e098bc96SEvan Quan 1192e098bc96SEvan Quan return ret; 1193e098bc96SEvan Quan } 1194e098bc96SEvan Quan 1195e098bc96SEvan Quan int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 1196e098bc96SEvan Quan uint32_t msg_id) 1197e098bc96SEvan Quan { 1198e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1199e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = 1200e098bc96SEvan Quan adev->powerplay.pp_funcs; 1201e098bc96SEvan Quan int ret = 0; 1202e098bc96SEvan Quan 1203e098bc96SEvan Quan if (pp_funcs && pp_funcs->set_clockgating_by_smu) 1204e098bc96SEvan Quan ret = pp_funcs->set_clockgating_by_smu(pp_handle, 1205e098bc96SEvan Quan msg_id); 1206e098bc96SEvan Quan 1207e098bc96SEvan Quan return ret; 1208e098bc96SEvan Quan } 1209e098bc96SEvan Quan 1210e098bc96SEvan Quan int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 1211e098bc96SEvan Quan bool acquire) 1212e098bc96SEvan Quan { 1213e098bc96SEvan Quan void *pp_handle = adev->powerplay.pp_handle; 1214e098bc96SEvan Quan const struct amd_pm_funcs *pp_funcs = 1215e098bc96SEvan Quan adev->powerplay.pp_funcs; 1216e098bc96SEvan Quan int ret = -EOPNOTSUPP; 1217e098bc96SEvan Quan 1218e098bc96SEvan Quan if (pp_funcs && pp_funcs->smu_i2c_bus_access) 1219e098bc96SEvan Quan ret = pp_funcs->smu_i2c_bus_access(pp_handle, 1220e098bc96SEvan Quan acquire); 1221e098bc96SEvan Quan 1222e098bc96SEvan Quan return ret; 1223e098bc96SEvan Quan } 1224e098bc96SEvan Quan 1225e098bc96SEvan Quan void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 1226e098bc96SEvan Quan { 1227e098bc96SEvan Quan if (adev->pm.dpm_enabled) { 1228e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1229e098bc96SEvan Quan if (power_supply_is_system_supplied() > 0) 1230e098bc96SEvan Quan adev->pm.ac_power = true; 1231e098bc96SEvan Quan else 1232e098bc96SEvan Quan adev->pm.ac_power = false; 1233e098bc96SEvan Quan if (adev->powerplay.pp_funcs && 1234e098bc96SEvan Quan adev->powerplay.pp_funcs->enable_bapm) 1235e098bc96SEvan Quan amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 1236e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1237e098bc96SEvan Quan 1238e098bc96SEvan Quan if (is_support_sw_smu(adev)) 1239e098bc96SEvan Quan smu_set_ac_dc(&adev->smu); 1240e098bc96SEvan Quan } 1241e098bc96SEvan Quan } 1242e098bc96SEvan Quan 1243e098bc96SEvan Quan int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 1244e098bc96SEvan Quan void *data, uint32_t *size) 1245e098bc96SEvan Quan { 12469ab5001aSDarren Powell const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1247e098bc96SEvan Quan int ret = 0; 1248e098bc96SEvan Quan 1249e098bc96SEvan Quan if (!data || !size) 1250e098bc96SEvan Quan return -EINVAL; 1251e098bc96SEvan Quan 12529ab5001aSDarren Powell if (pp_funcs && pp_funcs->read_sensor) 12539ab5001aSDarren Powell ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle, 1254e098bc96SEvan Quan sensor, data, size); 1255e098bc96SEvan Quan else 1256e098bc96SEvan Quan ret = -EINVAL; 1257e098bc96SEvan Quan 1258e098bc96SEvan Quan return ret; 1259e098bc96SEvan Quan } 1260e098bc96SEvan Quan 1261e098bc96SEvan Quan void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1262e098bc96SEvan Quan { 1263e098bc96SEvan Quan struct amdgpu_device *adev = 1264e098bc96SEvan Quan container_of(work, struct amdgpu_device, 1265e098bc96SEvan Quan pm.dpm.thermal.work); 1266e098bc96SEvan Quan /* switch to the thermal state */ 1267e098bc96SEvan Quan enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1268e098bc96SEvan Quan int temp, size = sizeof(temp); 1269e098bc96SEvan Quan 1270e098bc96SEvan Quan if (!adev->pm.dpm_enabled) 1271e098bc96SEvan Quan return; 1272e098bc96SEvan Quan 1273e098bc96SEvan Quan if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 1274e098bc96SEvan Quan (void *)&temp, &size)) { 1275e098bc96SEvan Quan if (temp < adev->pm.dpm.thermal.min_temp) 1276e098bc96SEvan Quan /* switch back the user state */ 1277e098bc96SEvan Quan dpm_state = adev->pm.dpm.user_state; 1278e098bc96SEvan Quan } else { 1279e098bc96SEvan Quan if (adev->pm.dpm.thermal.high_to_low) 1280e098bc96SEvan Quan /* switch back the user state */ 1281e098bc96SEvan Quan dpm_state = adev->pm.dpm.user_state; 1282e098bc96SEvan Quan } 1283e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1284e098bc96SEvan Quan if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1285e098bc96SEvan Quan adev->pm.dpm.thermal_active = true; 1286e098bc96SEvan Quan else 1287e098bc96SEvan Quan adev->pm.dpm.thermal_active = false; 1288e098bc96SEvan Quan adev->pm.dpm.state = dpm_state; 1289e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1290e098bc96SEvan Quan 1291e098bc96SEvan Quan amdgpu_pm_compute_clocks(adev); 1292e098bc96SEvan Quan } 1293e098bc96SEvan Quan 1294e098bc96SEvan Quan static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 1295e098bc96SEvan Quan enum amd_pm_state_type dpm_state) 1296e098bc96SEvan Quan { 1297e098bc96SEvan Quan int i; 1298e098bc96SEvan Quan struct amdgpu_ps *ps; 1299e098bc96SEvan Quan u32 ui_class; 1300e098bc96SEvan Quan bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 1301e098bc96SEvan Quan true : false; 1302e098bc96SEvan Quan 1303e098bc96SEvan Quan /* check if the vblank period is too short to adjust the mclk */ 1304e098bc96SEvan Quan if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 1305e098bc96SEvan Quan if (amdgpu_dpm_vblank_too_short(adev)) 1306e098bc96SEvan Quan single_display = false; 1307e098bc96SEvan Quan } 1308e098bc96SEvan Quan 1309e098bc96SEvan Quan /* certain older asics have a separare 3D performance state, 1310e098bc96SEvan Quan * so try that first if the user selected performance 1311e098bc96SEvan Quan */ 1312e098bc96SEvan Quan if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 1313e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 1314e098bc96SEvan Quan /* balanced states don't exist at the moment */ 1315e098bc96SEvan Quan if (dpm_state == POWER_STATE_TYPE_BALANCED) 1316e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1317e098bc96SEvan Quan 1318e098bc96SEvan Quan restart_search: 1319e098bc96SEvan Quan /* Pick the best power state based on current conditions */ 1320e098bc96SEvan Quan for (i = 0; i < adev->pm.dpm.num_ps; i++) { 1321e098bc96SEvan Quan ps = &adev->pm.dpm.ps[i]; 1322e098bc96SEvan Quan ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 1323e098bc96SEvan Quan switch (dpm_state) { 1324e098bc96SEvan Quan /* user states */ 1325e098bc96SEvan Quan case POWER_STATE_TYPE_BATTERY: 1326e098bc96SEvan Quan if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 1327e098bc96SEvan Quan if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1328e098bc96SEvan Quan if (single_display) 1329e098bc96SEvan Quan return ps; 1330e098bc96SEvan Quan } else 1331e098bc96SEvan Quan return ps; 1332e098bc96SEvan Quan } 1333e098bc96SEvan Quan break; 1334e098bc96SEvan Quan case POWER_STATE_TYPE_BALANCED: 1335e098bc96SEvan Quan if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 1336e098bc96SEvan Quan if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1337e098bc96SEvan Quan if (single_display) 1338e098bc96SEvan Quan return ps; 1339e098bc96SEvan Quan } else 1340e098bc96SEvan Quan return ps; 1341e098bc96SEvan Quan } 1342e098bc96SEvan Quan break; 1343e098bc96SEvan Quan case POWER_STATE_TYPE_PERFORMANCE: 1344e098bc96SEvan Quan if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 1345e098bc96SEvan Quan if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1346e098bc96SEvan Quan if (single_display) 1347e098bc96SEvan Quan return ps; 1348e098bc96SEvan Quan } else 1349e098bc96SEvan Quan return ps; 1350e098bc96SEvan Quan } 1351e098bc96SEvan Quan break; 1352e098bc96SEvan Quan /* internal states */ 1353e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD: 1354e098bc96SEvan Quan if (adev->pm.dpm.uvd_ps) 1355e098bc96SEvan Quan return adev->pm.dpm.uvd_ps; 1356e098bc96SEvan Quan else 1357e098bc96SEvan Quan break; 1358e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1359e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 1360e098bc96SEvan Quan return ps; 1361e098bc96SEvan Quan break; 1362e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1363e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 1364e098bc96SEvan Quan return ps; 1365e098bc96SEvan Quan break; 1366e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1367e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 1368e098bc96SEvan Quan return ps; 1369e098bc96SEvan Quan break; 1370e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1371e098bc96SEvan Quan if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 1372e098bc96SEvan Quan return ps; 1373e098bc96SEvan Quan break; 1374e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_BOOT: 1375e098bc96SEvan Quan return adev->pm.dpm.boot_ps; 1376e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_THERMAL: 1377e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1378e098bc96SEvan Quan return ps; 1379e098bc96SEvan Quan break; 1380e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_ACPI: 1381e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1382e098bc96SEvan Quan return ps; 1383e098bc96SEvan Quan break; 1384e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_ULV: 1385e098bc96SEvan Quan if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1386e098bc96SEvan Quan return ps; 1387e098bc96SEvan Quan break; 1388e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_3DPERF: 1389e098bc96SEvan Quan if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1390e098bc96SEvan Quan return ps; 1391e098bc96SEvan Quan break; 1392e098bc96SEvan Quan default: 1393e098bc96SEvan Quan break; 1394e098bc96SEvan Quan } 1395e098bc96SEvan Quan } 1396e098bc96SEvan Quan /* use a fallback state if we didn't match */ 1397e098bc96SEvan Quan switch (dpm_state) { 1398e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1399e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1400e098bc96SEvan Quan goto restart_search; 1401e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1402e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1403e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1404e098bc96SEvan Quan if (adev->pm.dpm.uvd_ps) { 1405e098bc96SEvan Quan return adev->pm.dpm.uvd_ps; 1406e098bc96SEvan Quan } else { 1407e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1408e098bc96SEvan Quan goto restart_search; 1409e098bc96SEvan Quan } 1410e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_THERMAL: 1411e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1412e098bc96SEvan Quan goto restart_search; 1413e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_ACPI: 1414e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_BATTERY; 1415e098bc96SEvan Quan goto restart_search; 1416e098bc96SEvan Quan case POWER_STATE_TYPE_BATTERY: 1417e098bc96SEvan Quan case POWER_STATE_TYPE_BALANCED: 1418e098bc96SEvan Quan case POWER_STATE_TYPE_INTERNAL_3DPERF: 1419e098bc96SEvan Quan dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1420e098bc96SEvan Quan goto restart_search; 1421e098bc96SEvan Quan default: 1422e098bc96SEvan Quan break; 1423e098bc96SEvan Quan } 1424e098bc96SEvan Quan 1425e098bc96SEvan Quan return NULL; 1426e098bc96SEvan Quan } 1427e098bc96SEvan Quan 1428e098bc96SEvan Quan static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1429e098bc96SEvan Quan { 1430e098bc96SEvan Quan struct amdgpu_ps *ps; 1431e098bc96SEvan Quan enum amd_pm_state_type dpm_state; 1432e098bc96SEvan Quan int ret; 1433e098bc96SEvan Quan bool equal = false; 1434e098bc96SEvan Quan 1435e098bc96SEvan Quan /* if dpm init failed */ 1436e098bc96SEvan Quan if (!adev->pm.dpm_enabled) 1437e098bc96SEvan Quan return; 1438e098bc96SEvan Quan 1439e098bc96SEvan Quan if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1440e098bc96SEvan Quan /* add other state override checks here */ 1441e098bc96SEvan Quan if ((!adev->pm.dpm.thermal_active) && 1442e098bc96SEvan Quan (!adev->pm.dpm.uvd_active)) 1443e098bc96SEvan Quan adev->pm.dpm.state = adev->pm.dpm.user_state; 1444e098bc96SEvan Quan } 1445e098bc96SEvan Quan dpm_state = adev->pm.dpm.state; 1446e098bc96SEvan Quan 1447e098bc96SEvan Quan ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1448e098bc96SEvan Quan if (ps) 1449e098bc96SEvan Quan adev->pm.dpm.requested_ps = ps; 1450e098bc96SEvan Quan else 1451e098bc96SEvan Quan return; 1452e098bc96SEvan Quan 1453e098bc96SEvan Quan if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 1454e098bc96SEvan Quan printk("switching from power state:\n"); 1455e098bc96SEvan Quan amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1456e098bc96SEvan Quan printk("switching to power state:\n"); 1457e098bc96SEvan Quan amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1458e098bc96SEvan Quan } 1459e098bc96SEvan Quan 1460e098bc96SEvan Quan /* update whether vce is active */ 1461e098bc96SEvan Quan ps->vce_active = adev->pm.dpm.vce_active; 1462e098bc96SEvan Quan if (adev->powerplay.pp_funcs->display_configuration_changed) 1463e098bc96SEvan Quan amdgpu_dpm_display_configuration_changed(adev); 1464e098bc96SEvan Quan 1465e098bc96SEvan Quan ret = amdgpu_dpm_pre_set_power_state(adev); 1466e098bc96SEvan Quan if (ret) 1467e098bc96SEvan Quan return; 1468e098bc96SEvan Quan 1469e098bc96SEvan Quan if (adev->powerplay.pp_funcs->check_state_equal) { 1470e098bc96SEvan Quan if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 1471e098bc96SEvan Quan equal = false; 1472e098bc96SEvan Quan } 1473e098bc96SEvan Quan 1474e098bc96SEvan Quan if (equal) 1475e098bc96SEvan Quan return; 1476e098bc96SEvan Quan 147779c65f3fSEvan Quan if (adev->powerplay.pp_funcs->set_power_state) 147879c65f3fSEvan Quan adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); 147979c65f3fSEvan Quan 1480e098bc96SEvan Quan amdgpu_dpm_post_set_power_state(adev); 1481e098bc96SEvan Quan 1482e098bc96SEvan Quan adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1483e098bc96SEvan Quan adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1484e098bc96SEvan Quan 1485e098bc96SEvan Quan if (adev->powerplay.pp_funcs->force_performance_level) { 1486e098bc96SEvan Quan if (adev->pm.dpm.thermal_active) { 1487e098bc96SEvan Quan enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1488e098bc96SEvan Quan /* force low perf level for thermal */ 1489e098bc96SEvan Quan amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 1490e098bc96SEvan Quan /* save the user's level */ 1491e098bc96SEvan Quan adev->pm.dpm.forced_level = level; 1492e098bc96SEvan Quan } else { 1493e098bc96SEvan Quan /* otherwise, user selected level */ 1494e098bc96SEvan Quan amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1495e098bc96SEvan Quan } 1496e098bc96SEvan Quan } 1497e098bc96SEvan Quan } 1498e098bc96SEvan Quan 1499e098bc96SEvan Quan void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1500e098bc96SEvan Quan { 1501e098bc96SEvan Quan int i = 0; 1502e098bc96SEvan Quan 1503e098bc96SEvan Quan if (!adev->pm.dpm_enabled) 1504e098bc96SEvan Quan return; 1505e098bc96SEvan Quan 1506e098bc96SEvan Quan if (adev->mode_info.num_crtc) 1507e098bc96SEvan Quan amdgpu_display_bandwidth_update(adev); 1508e098bc96SEvan Quan 1509e098bc96SEvan Quan for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1510e098bc96SEvan Quan struct amdgpu_ring *ring = adev->rings[i]; 1511e098bc96SEvan Quan if (ring && ring->sched.ready) 1512e098bc96SEvan Quan amdgpu_fence_wait_empty(ring); 1513e098bc96SEvan Quan } 1514e098bc96SEvan Quan 1515e098bc96SEvan Quan if (adev->powerplay.pp_funcs->dispatch_tasks) { 1516e098bc96SEvan Quan if (!amdgpu_device_has_dc_support(adev)) { 1517e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1518e098bc96SEvan Quan amdgpu_dpm_get_active_displays(adev); 1519e098bc96SEvan Quan adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1520e098bc96SEvan Quan adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1521e098bc96SEvan Quan adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1522bc7d6c12SDarren Powell /* we have issues with mclk switching with 1523bc7d6c12SDarren Powell * refresh rates over 120 hz on the non-DC code. 1524bc7d6c12SDarren Powell */ 1525e098bc96SEvan Quan if (adev->pm.pm_display_cfg.vrefresh > 120) 1526e098bc96SEvan Quan adev->pm.pm_display_cfg.min_vblank_time = 0; 1527e098bc96SEvan Quan if (adev->powerplay.pp_funcs->display_configuration_change) 1528e098bc96SEvan Quan adev->powerplay.pp_funcs->display_configuration_change( 1529e098bc96SEvan Quan adev->powerplay.pp_handle, 1530e098bc96SEvan Quan &adev->pm.pm_display_cfg); 1531e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1532e098bc96SEvan Quan } 1533e098bc96SEvan Quan amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 1534e098bc96SEvan Quan } else { 1535e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1536e098bc96SEvan Quan amdgpu_dpm_get_active_displays(adev); 1537e098bc96SEvan Quan amdgpu_dpm_change_power_state_locked(adev); 1538e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1539e098bc96SEvan Quan } 1540e098bc96SEvan Quan } 1541e098bc96SEvan Quan 1542e098bc96SEvan Quan void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1543e098bc96SEvan Quan { 1544e098bc96SEvan Quan int ret = 0; 1545e098bc96SEvan Quan 1546e098bc96SEvan Quan if (adev->family == AMDGPU_FAMILY_SI) { 1547e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1548e098bc96SEvan Quan if (enable) { 1549e098bc96SEvan Quan adev->pm.dpm.uvd_active = true; 1550e098bc96SEvan Quan adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1551e098bc96SEvan Quan } else { 1552e098bc96SEvan Quan adev->pm.dpm.uvd_active = false; 1553e098bc96SEvan Quan } 1554e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1555e098bc96SEvan Quan 1556e098bc96SEvan Quan amdgpu_pm_compute_clocks(adev); 1557e098bc96SEvan Quan } else { 1558e098bc96SEvan Quan ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 1559e098bc96SEvan Quan if (ret) 1560e098bc96SEvan Quan DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 1561e098bc96SEvan Quan enable ? "enable" : "disable", ret); 1562e098bc96SEvan Quan 1563e098bc96SEvan Quan /* enable/disable Low Memory PState for UVD (4k videos) */ 1564e098bc96SEvan Quan if (adev->asic_type == CHIP_STONEY && 1565e098bc96SEvan Quan adev->uvd.decode_image_width >= WIDTH_4K) { 1566e098bc96SEvan Quan struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1567e098bc96SEvan Quan 1568e098bc96SEvan Quan if (hwmgr && hwmgr->hwmgr_func && 1569e098bc96SEvan Quan hwmgr->hwmgr_func->update_nbdpm_pstate) 1570e098bc96SEvan Quan hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 1571e098bc96SEvan Quan !enable, 1572e098bc96SEvan Quan true); 1573e098bc96SEvan Quan } 1574e098bc96SEvan Quan } 1575e098bc96SEvan Quan } 1576e098bc96SEvan Quan 1577e098bc96SEvan Quan void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1578e098bc96SEvan Quan { 1579e098bc96SEvan Quan int ret = 0; 1580e098bc96SEvan Quan 1581e098bc96SEvan Quan if (adev->family == AMDGPU_FAMILY_SI) { 1582e098bc96SEvan Quan mutex_lock(&adev->pm.mutex); 1583e098bc96SEvan Quan if (enable) { 1584e098bc96SEvan Quan adev->pm.dpm.vce_active = true; 1585e098bc96SEvan Quan /* XXX select vce level based on ring/task */ 1586e098bc96SEvan Quan adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1587e098bc96SEvan Quan } else { 1588e098bc96SEvan Quan adev->pm.dpm.vce_active = false; 1589e098bc96SEvan Quan } 1590e098bc96SEvan Quan mutex_unlock(&adev->pm.mutex); 1591e098bc96SEvan Quan 1592e098bc96SEvan Quan amdgpu_pm_compute_clocks(adev); 1593e098bc96SEvan Quan } else { 1594e098bc96SEvan Quan ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 1595e098bc96SEvan Quan if (ret) 1596e098bc96SEvan Quan DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 1597e098bc96SEvan Quan enable ? "enable" : "disable", ret); 1598e098bc96SEvan Quan } 1599e098bc96SEvan Quan } 1600e098bc96SEvan Quan 1601e098bc96SEvan Quan void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1602e098bc96SEvan Quan { 1603e098bc96SEvan Quan int i; 1604e098bc96SEvan Quan 1605e098bc96SEvan Quan if (adev->powerplay.pp_funcs->print_power_state == NULL) 1606e098bc96SEvan Quan return; 1607e098bc96SEvan Quan 1608e098bc96SEvan Quan for (i = 0; i < adev->pm.dpm.num_ps; i++) 1609e098bc96SEvan Quan amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1610e098bc96SEvan Quan 1611e098bc96SEvan Quan } 1612e098bc96SEvan Quan 1613e098bc96SEvan Quan void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 1614e098bc96SEvan Quan { 1615e098bc96SEvan Quan int ret = 0; 1616e098bc96SEvan Quan 1617e098bc96SEvan Quan ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 1618e098bc96SEvan Quan if (ret) 1619e098bc96SEvan Quan DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 1620e098bc96SEvan Quan enable ? "enable" : "disable", ret); 1621e098bc96SEvan Quan } 1622e098bc96SEvan Quan 1623e098bc96SEvan Quan int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 1624e098bc96SEvan Quan { 1625e098bc96SEvan Quan int r; 1626e098bc96SEvan Quan 1627e098bc96SEvan Quan if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 1628e098bc96SEvan Quan r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 1629e098bc96SEvan Quan if (r) { 1630e098bc96SEvan Quan pr_err("smu firmware loading failed\n"); 1631e098bc96SEvan Quan return r; 1632e098bc96SEvan Quan } 16332e4b2f7bSEvan Quan 16342e4b2f7bSEvan Quan if (smu_version) 1635e098bc96SEvan Quan *smu_version = adev->pm.fw_version; 1636e098bc96SEvan Quan } 16372e4b2f7bSEvan Quan 1638e098bc96SEvan Quan return 0; 1639e098bc96SEvan Quan } 1640bc143d8bSEvan Quan 1641bc143d8bSEvan Quan int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 1642bc143d8bSEvan Quan { 1643bc143d8bSEvan Quan return smu_handle_passthrough_sbr(&adev->smu, enable); 1644bc143d8bSEvan Quan } 1645bc143d8bSEvan Quan 1646bc143d8bSEvan Quan int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 1647bc143d8bSEvan Quan { 1648bc143d8bSEvan Quan return smu_send_hbm_bad_pages_num(&adev->smu, size); 1649bc143d8bSEvan Quan } 1650bc143d8bSEvan Quan 1651bc143d8bSEvan Quan int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 1652bc143d8bSEvan Quan enum pp_clock_type type, 1653bc143d8bSEvan Quan uint32_t *min, 1654bc143d8bSEvan Quan uint32_t *max) 1655bc143d8bSEvan Quan { 1656bc143d8bSEvan Quan if (!is_support_sw_smu(adev)) 1657bc143d8bSEvan Quan return -EOPNOTSUPP; 1658bc143d8bSEvan Quan 1659bc143d8bSEvan Quan switch (type) { 1660bc143d8bSEvan Quan case PP_SCLK: 1661bc143d8bSEvan Quan return smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, min, max); 1662bc143d8bSEvan Quan default: 1663bc143d8bSEvan Quan return -EINVAL; 1664bc143d8bSEvan Quan } 1665bc143d8bSEvan Quan } 1666bc143d8bSEvan Quan 1667bc143d8bSEvan Quan int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 1668bc143d8bSEvan Quan enum pp_clock_type type, 1669bc143d8bSEvan Quan uint32_t min, 1670bc143d8bSEvan Quan uint32_t max) 1671bc143d8bSEvan Quan { 1672bc143d8bSEvan Quan if (!is_support_sw_smu(adev)) 1673bc143d8bSEvan Quan return -EOPNOTSUPP; 1674bc143d8bSEvan Quan 1675bc143d8bSEvan Quan switch (type) { 1676bc143d8bSEvan Quan case PP_SCLK: 1677bc143d8bSEvan Quan return smu_set_soft_freq_range(&adev->smu, SMU_SCLK, min, max); 1678bc143d8bSEvan Quan default: 1679bc143d8bSEvan Quan return -EINVAL; 1680bc143d8bSEvan Quan } 1681bc143d8bSEvan Quan } 1682bc143d8bSEvan Quan 168313f5dbd6SEvan Quan int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 168413f5dbd6SEvan Quan { 168513f5dbd6SEvan Quan if (!is_support_sw_smu(adev)) 168613f5dbd6SEvan Quan return 0; 168713f5dbd6SEvan Quan 168813f5dbd6SEvan Quan return smu_write_watermarks_table(&adev->smu); 168913f5dbd6SEvan Quan } 169013f5dbd6SEvan Quan 1691bc143d8bSEvan Quan int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 1692bc143d8bSEvan Quan enum smu_event_type event, 1693bc143d8bSEvan Quan uint64_t event_arg) 1694bc143d8bSEvan Quan { 1695bc143d8bSEvan Quan if (!is_support_sw_smu(adev)) 1696bc143d8bSEvan Quan return -EOPNOTSUPP; 1697bc143d8bSEvan Quan 1698bc143d8bSEvan Quan return smu_wait_for_event(&adev->smu, event, event_arg); 1699bc143d8bSEvan Quan } 1700bc143d8bSEvan Quan 1701bc143d8bSEvan Quan int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 1702bc143d8bSEvan Quan { 1703bc143d8bSEvan Quan if (!is_support_sw_smu(adev)) 1704bc143d8bSEvan Quan return -EOPNOTSUPP; 1705bc143d8bSEvan Quan 1706bc143d8bSEvan Quan return smu_get_status_gfxoff(&adev->smu, value); 1707bc143d8bSEvan Quan } 1708bc143d8bSEvan Quan 1709bc143d8bSEvan Quan uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 1710bc143d8bSEvan Quan { 1711bc143d8bSEvan Quan return atomic64_read(&adev->smu.throttle_int_counter); 1712bc143d8bSEvan Quan } 1713bc143d8bSEvan Quan 1714bc143d8bSEvan Quan /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 1715bc143d8bSEvan Quan * @adev: amdgpu_device pointer 1716bc143d8bSEvan Quan * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 1717bc143d8bSEvan Quan * 1718bc143d8bSEvan Quan */ 1719bc143d8bSEvan Quan void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 1720bc143d8bSEvan Quan enum gfx_change_state state) 1721bc143d8bSEvan Quan { 1722bc143d8bSEvan Quan mutex_lock(&adev->pm.mutex); 1723bc143d8bSEvan Quan if (adev->powerplay.pp_funcs && 1724bc143d8bSEvan Quan adev->powerplay.pp_funcs->gfx_state_change_set) 1725bc143d8bSEvan Quan ((adev)->powerplay.pp_funcs->gfx_state_change_set( 1726bc143d8bSEvan Quan (adev)->powerplay.pp_handle, state)); 1727bc143d8bSEvan Quan mutex_unlock(&adev->pm.mutex); 1728bc143d8bSEvan Quan } 1729bc143d8bSEvan Quan 1730bc143d8bSEvan Quan int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 1731bc143d8bSEvan Quan void *umc_ecc) 1732bc143d8bSEvan Quan { 1733bc143d8bSEvan Quan if (!is_support_sw_smu(adev)) 1734bc143d8bSEvan Quan return -EOPNOTSUPP; 1735bc143d8bSEvan Quan 1736bc143d8bSEvan Quan return smu_get_ecc_info(&adev->smu, umc_ecc); 1737bc143d8bSEvan Quan } 173879c65f3fSEvan Quan 173979c65f3fSEvan Quan struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 174079c65f3fSEvan Quan uint32_t idx) 174179c65f3fSEvan Quan { 174279c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 174379c65f3fSEvan Quan 174479c65f3fSEvan Quan if (!pp_funcs->get_vce_clock_state) 174579c65f3fSEvan Quan return NULL; 174679c65f3fSEvan Quan 174779c65f3fSEvan Quan return pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 174879c65f3fSEvan Quan idx); 174979c65f3fSEvan Quan } 175079c65f3fSEvan Quan 175179c65f3fSEvan Quan void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 175279c65f3fSEvan Quan enum amd_pm_state_type *state) 175379c65f3fSEvan Quan { 175479c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 175579c65f3fSEvan Quan 175679c65f3fSEvan Quan if (!pp_funcs->get_current_power_state) { 175779c65f3fSEvan Quan *state = adev->pm.dpm.user_state; 175879c65f3fSEvan Quan return; 175979c65f3fSEvan Quan } 176079c65f3fSEvan Quan 176179c65f3fSEvan Quan *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 176279c65f3fSEvan Quan if (*state < POWER_STATE_TYPE_DEFAULT || 176379c65f3fSEvan Quan *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 176479c65f3fSEvan Quan *state = adev->pm.dpm.user_state; 176579c65f3fSEvan Quan } 176679c65f3fSEvan Quan 176779c65f3fSEvan Quan void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 176879c65f3fSEvan Quan enum amd_pm_state_type state) 176979c65f3fSEvan Quan { 177079c65f3fSEvan Quan adev->pm.dpm.user_state = state; 177179c65f3fSEvan Quan 177279c65f3fSEvan Quan if (is_support_sw_smu(adev)) 177379c65f3fSEvan Quan return; 177479c65f3fSEvan Quan 177579c65f3fSEvan Quan if (amdgpu_dpm_dispatch_task(adev, 177679c65f3fSEvan Quan AMD_PP_TASK_ENABLE_USER_STATE, 177779c65f3fSEvan Quan &state) == -EOPNOTSUPP) 177879c65f3fSEvan Quan amdgpu_pm_compute_clocks(adev); 177979c65f3fSEvan Quan } 178079c65f3fSEvan Quan 178179c65f3fSEvan Quan enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 178279c65f3fSEvan Quan { 178379c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 178479c65f3fSEvan Quan enum amd_dpm_forced_level level; 178579c65f3fSEvan Quan 178679c65f3fSEvan Quan if (pp_funcs->get_performance_level) 178779c65f3fSEvan Quan level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 178879c65f3fSEvan Quan else 178979c65f3fSEvan Quan level = adev->pm.dpm.forced_level; 179079c65f3fSEvan Quan 179179c65f3fSEvan Quan return level; 179279c65f3fSEvan Quan } 179379c65f3fSEvan Quan 179479c65f3fSEvan Quan int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 179579c65f3fSEvan Quan enum amd_dpm_forced_level level) 179679c65f3fSEvan Quan { 179779c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 179879c65f3fSEvan Quan 179979c65f3fSEvan Quan if (pp_funcs->force_performance_level) { 180079c65f3fSEvan Quan if (adev->pm.dpm.thermal_active) 180179c65f3fSEvan Quan return -EINVAL; 180279c65f3fSEvan Quan 180379c65f3fSEvan Quan if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 180479c65f3fSEvan Quan level)) 180579c65f3fSEvan Quan return -EINVAL; 180679c65f3fSEvan Quan 180779c65f3fSEvan Quan adev->pm.dpm.forced_level = level; 180879c65f3fSEvan Quan } 180979c65f3fSEvan Quan 181079c65f3fSEvan Quan return 0; 181179c65f3fSEvan Quan } 181279c65f3fSEvan Quan 181379c65f3fSEvan Quan int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 181479c65f3fSEvan Quan struct pp_states_info *states) 181579c65f3fSEvan Quan { 181679c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 181779c65f3fSEvan Quan 181879c65f3fSEvan Quan if (!pp_funcs->get_pp_num_states) 181979c65f3fSEvan Quan return -EOPNOTSUPP; 182079c65f3fSEvan Quan 182179c65f3fSEvan Quan return pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, states); 182279c65f3fSEvan Quan } 182379c65f3fSEvan Quan 182479c65f3fSEvan Quan int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 182579c65f3fSEvan Quan enum amd_pp_task task_id, 182679c65f3fSEvan Quan enum amd_pm_state_type *user_state) 182779c65f3fSEvan Quan { 182879c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 182979c65f3fSEvan Quan 183079c65f3fSEvan Quan if (!pp_funcs->dispatch_tasks) 183179c65f3fSEvan Quan return -EOPNOTSUPP; 183279c65f3fSEvan Quan 183379c65f3fSEvan Quan return pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, task_id, user_state); 183479c65f3fSEvan Quan } 183579c65f3fSEvan Quan 183679c65f3fSEvan Quan int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 183779c65f3fSEvan Quan { 183879c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 183979c65f3fSEvan Quan 184079c65f3fSEvan Quan if (!pp_funcs->get_pp_table) 184179c65f3fSEvan Quan return 0; 184279c65f3fSEvan Quan 184379c65f3fSEvan Quan return pp_funcs->get_pp_table(adev->powerplay.pp_handle, table); 184479c65f3fSEvan Quan } 184579c65f3fSEvan Quan 184679c65f3fSEvan Quan int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 184779c65f3fSEvan Quan uint32_t type, 184879c65f3fSEvan Quan long *input, 184979c65f3fSEvan Quan uint32_t size) 185079c65f3fSEvan Quan { 185179c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 185279c65f3fSEvan Quan 185379c65f3fSEvan Quan if (!pp_funcs->set_fine_grain_clk_vol) 185479c65f3fSEvan Quan return 0; 185579c65f3fSEvan Quan 185679c65f3fSEvan Quan return pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 185779c65f3fSEvan Quan type, 185879c65f3fSEvan Quan input, 185979c65f3fSEvan Quan size); 186079c65f3fSEvan Quan } 186179c65f3fSEvan Quan 186279c65f3fSEvan Quan int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 186379c65f3fSEvan Quan uint32_t type, 186479c65f3fSEvan Quan long *input, 186579c65f3fSEvan Quan uint32_t size) 186679c65f3fSEvan Quan { 186779c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 186879c65f3fSEvan Quan 186979c65f3fSEvan Quan if (!pp_funcs->odn_edit_dpm_table) 187079c65f3fSEvan Quan return 0; 187179c65f3fSEvan Quan 187279c65f3fSEvan Quan return pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 187379c65f3fSEvan Quan type, 187479c65f3fSEvan Quan input, 187579c65f3fSEvan Quan size); 187679c65f3fSEvan Quan } 187779c65f3fSEvan Quan 187879c65f3fSEvan Quan int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 187979c65f3fSEvan Quan enum pp_clock_type type, 188079c65f3fSEvan Quan char *buf) 188179c65f3fSEvan Quan { 188279c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 188379c65f3fSEvan Quan 188479c65f3fSEvan Quan if (!pp_funcs->print_clock_levels) 188579c65f3fSEvan Quan return 0; 188679c65f3fSEvan Quan 188779c65f3fSEvan Quan return pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 188879c65f3fSEvan Quan type, 188979c65f3fSEvan Quan buf); 189079c65f3fSEvan Quan } 189179c65f3fSEvan Quan 189279c65f3fSEvan Quan int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 189379c65f3fSEvan Quan uint64_t ppfeature_masks) 189479c65f3fSEvan Quan { 189579c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 189679c65f3fSEvan Quan 189779c65f3fSEvan Quan if (!pp_funcs->set_ppfeature_status) 189879c65f3fSEvan Quan return 0; 189979c65f3fSEvan Quan 190079c65f3fSEvan Quan return pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 190179c65f3fSEvan Quan ppfeature_masks); 190279c65f3fSEvan Quan } 190379c65f3fSEvan Quan 190479c65f3fSEvan Quan int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 190579c65f3fSEvan Quan { 190679c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 190779c65f3fSEvan Quan 190879c65f3fSEvan Quan if (!pp_funcs->get_ppfeature_status) 190979c65f3fSEvan Quan return 0; 191079c65f3fSEvan Quan 191179c65f3fSEvan Quan return pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 191279c65f3fSEvan Quan buf); 191379c65f3fSEvan Quan } 191479c65f3fSEvan Quan 191579c65f3fSEvan Quan int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 191679c65f3fSEvan Quan enum pp_clock_type type, 191779c65f3fSEvan Quan uint32_t mask) 191879c65f3fSEvan Quan { 191979c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 192079c65f3fSEvan Quan 192179c65f3fSEvan Quan if (!pp_funcs->force_clock_level) 192279c65f3fSEvan Quan return 0; 192379c65f3fSEvan Quan 192479c65f3fSEvan Quan return pp_funcs->force_clock_level(adev->powerplay.pp_handle, 192579c65f3fSEvan Quan type, 192679c65f3fSEvan Quan mask); 192779c65f3fSEvan Quan } 192879c65f3fSEvan Quan 192979c65f3fSEvan Quan int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 193079c65f3fSEvan Quan { 193179c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 193279c65f3fSEvan Quan 193379c65f3fSEvan Quan if (!pp_funcs->get_sclk_od) 193479c65f3fSEvan Quan return 0; 193579c65f3fSEvan Quan 193679c65f3fSEvan Quan return pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 193779c65f3fSEvan Quan } 193879c65f3fSEvan Quan 193979c65f3fSEvan Quan int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 194079c65f3fSEvan Quan { 194179c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 194279c65f3fSEvan Quan 194379c65f3fSEvan Quan if (is_support_sw_smu(adev)) 194479c65f3fSEvan Quan return 0; 194579c65f3fSEvan Quan 194679c65f3fSEvan Quan if (pp_funcs->set_sclk_od) 194779c65f3fSEvan Quan pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 194879c65f3fSEvan Quan 194979c65f3fSEvan Quan if (amdgpu_dpm_dispatch_task(adev, 195079c65f3fSEvan Quan AMD_PP_TASK_READJUST_POWER_STATE, 195179c65f3fSEvan Quan NULL) == -EOPNOTSUPP) { 195279c65f3fSEvan Quan adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 195379c65f3fSEvan Quan amdgpu_pm_compute_clocks(adev); 195479c65f3fSEvan Quan } 195579c65f3fSEvan Quan 195679c65f3fSEvan Quan return 0; 195779c65f3fSEvan Quan } 195879c65f3fSEvan Quan 195979c65f3fSEvan Quan int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 196079c65f3fSEvan Quan { 196179c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 196279c65f3fSEvan Quan 196379c65f3fSEvan Quan if (!pp_funcs->get_mclk_od) 196479c65f3fSEvan Quan return 0; 196579c65f3fSEvan Quan 196679c65f3fSEvan Quan return pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 196779c65f3fSEvan Quan } 196879c65f3fSEvan Quan 196979c65f3fSEvan Quan int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 197079c65f3fSEvan Quan { 197179c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 197279c65f3fSEvan Quan 197379c65f3fSEvan Quan if (is_support_sw_smu(adev)) 197479c65f3fSEvan Quan return 0; 197579c65f3fSEvan Quan 197679c65f3fSEvan Quan if (pp_funcs->set_mclk_od) 197779c65f3fSEvan Quan pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 197879c65f3fSEvan Quan 197979c65f3fSEvan Quan if (amdgpu_dpm_dispatch_task(adev, 198079c65f3fSEvan Quan AMD_PP_TASK_READJUST_POWER_STATE, 198179c65f3fSEvan Quan NULL) == -EOPNOTSUPP) { 198279c65f3fSEvan Quan adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 198379c65f3fSEvan Quan amdgpu_pm_compute_clocks(adev); 198479c65f3fSEvan Quan } 198579c65f3fSEvan Quan 198679c65f3fSEvan Quan return 0; 198779c65f3fSEvan Quan } 198879c65f3fSEvan Quan 198979c65f3fSEvan Quan int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 199079c65f3fSEvan Quan char *buf) 199179c65f3fSEvan Quan { 199279c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 199379c65f3fSEvan Quan 199479c65f3fSEvan Quan if (!pp_funcs->get_power_profile_mode) 199579c65f3fSEvan Quan return -EOPNOTSUPP; 199679c65f3fSEvan Quan 199779c65f3fSEvan Quan return pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 199879c65f3fSEvan Quan buf); 199979c65f3fSEvan Quan } 200079c65f3fSEvan Quan 200179c65f3fSEvan Quan int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 200279c65f3fSEvan Quan long *input, uint32_t size) 200379c65f3fSEvan Quan { 200479c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 200579c65f3fSEvan Quan 200679c65f3fSEvan Quan if (!pp_funcs->set_power_profile_mode) 200779c65f3fSEvan Quan return 0; 200879c65f3fSEvan Quan 200979c65f3fSEvan Quan return pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 201079c65f3fSEvan Quan input, 201179c65f3fSEvan Quan size); 201279c65f3fSEvan Quan } 201379c65f3fSEvan Quan 201479c65f3fSEvan Quan int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 201579c65f3fSEvan Quan { 201679c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 201779c65f3fSEvan Quan 201879c65f3fSEvan Quan if (!pp_funcs->get_gpu_metrics) 201979c65f3fSEvan Quan return 0; 202079c65f3fSEvan Quan 202179c65f3fSEvan Quan return pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, table); 202279c65f3fSEvan Quan } 202379c65f3fSEvan Quan 202479c65f3fSEvan Quan int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 202579c65f3fSEvan Quan uint32_t *fan_mode) 202679c65f3fSEvan Quan { 202779c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 202879c65f3fSEvan Quan 202979c65f3fSEvan Quan if (!pp_funcs->get_fan_control_mode) 203079c65f3fSEvan Quan return -EOPNOTSUPP; 203179c65f3fSEvan Quan 203279c65f3fSEvan Quan *fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle); 203379c65f3fSEvan Quan 203479c65f3fSEvan Quan return 0; 203579c65f3fSEvan Quan } 203679c65f3fSEvan Quan 203779c65f3fSEvan Quan int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 203879c65f3fSEvan Quan uint32_t speed) 203979c65f3fSEvan Quan { 204079c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 204179c65f3fSEvan Quan 204279c65f3fSEvan Quan if (!pp_funcs->set_fan_speed_pwm) 204379c65f3fSEvan Quan return -EINVAL; 204479c65f3fSEvan Quan 204579c65f3fSEvan Quan return pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, speed); 204679c65f3fSEvan Quan } 204779c65f3fSEvan Quan 204879c65f3fSEvan Quan int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 204979c65f3fSEvan Quan uint32_t *speed) 205079c65f3fSEvan Quan { 205179c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 205279c65f3fSEvan Quan 205379c65f3fSEvan Quan if (!pp_funcs->get_fan_speed_pwm) 205479c65f3fSEvan Quan return -EINVAL; 205579c65f3fSEvan Quan 205679c65f3fSEvan Quan return pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, speed); 205779c65f3fSEvan Quan } 205879c65f3fSEvan Quan 205979c65f3fSEvan Quan int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 206079c65f3fSEvan Quan uint32_t *speed) 206179c65f3fSEvan Quan { 206279c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 206379c65f3fSEvan Quan 206479c65f3fSEvan Quan if (!pp_funcs->get_fan_speed_rpm) 206579c65f3fSEvan Quan return -EINVAL; 206679c65f3fSEvan Quan 206779c65f3fSEvan Quan return pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, speed); 206879c65f3fSEvan Quan } 206979c65f3fSEvan Quan 207079c65f3fSEvan Quan int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 207179c65f3fSEvan Quan uint32_t speed) 207279c65f3fSEvan Quan { 207379c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 207479c65f3fSEvan Quan 207579c65f3fSEvan Quan if (!pp_funcs->set_fan_speed_rpm) 207679c65f3fSEvan Quan return -EINVAL; 207779c65f3fSEvan Quan 207879c65f3fSEvan Quan return pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, speed); 207979c65f3fSEvan Quan } 208079c65f3fSEvan Quan 208179c65f3fSEvan Quan int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 208279c65f3fSEvan Quan uint32_t mode) 208379c65f3fSEvan Quan { 208479c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 208579c65f3fSEvan Quan 208679c65f3fSEvan Quan if (!pp_funcs->set_fan_control_mode) 208779c65f3fSEvan Quan return -EOPNOTSUPP; 208879c65f3fSEvan Quan 208979c65f3fSEvan Quan pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, mode); 209079c65f3fSEvan Quan 209179c65f3fSEvan Quan return 0; 209279c65f3fSEvan Quan } 209379c65f3fSEvan Quan 209479c65f3fSEvan Quan int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 209579c65f3fSEvan Quan uint32_t *limit, 209679c65f3fSEvan Quan enum pp_power_limit_level pp_limit_level, 209779c65f3fSEvan Quan enum pp_power_type power_type) 209879c65f3fSEvan Quan { 209979c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 210079c65f3fSEvan Quan 210179c65f3fSEvan Quan if (!pp_funcs->get_power_limit) 210279c65f3fSEvan Quan return -ENODATA; 210379c65f3fSEvan Quan 210479c65f3fSEvan Quan return pp_funcs->get_power_limit(adev->powerplay.pp_handle, 210579c65f3fSEvan Quan limit, 210679c65f3fSEvan Quan pp_limit_level, 210779c65f3fSEvan Quan power_type); 210879c65f3fSEvan Quan } 210979c65f3fSEvan Quan 211079c65f3fSEvan Quan int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 211179c65f3fSEvan Quan uint32_t limit) 211279c65f3fSEvan Quan { 211379c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 211479c65f3fSEvan Quan 211579c65f3fSEvan Quan if (!pp_funcs->set_power_limit) 211679c65f3fSEvan Quan return -EINVAL; 211779c65f3fSEvan Quan 211879c65f3fSEvan Quan return pp_funcs->set_power_limit(adev->powerplay.pp_handle, limit); 211979c65f3fSEvan Quan } 212079c65f3fSEvan Quan 212179c65f3fSEvan Quan int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 212279c65f3fSEvan Quan { 212379c65f3fSEvan Quan if (!is_support_sw_smu(adev)) 212479c65f3fSEvan Quan return false; 212579c65f3fSEvan Quan 212679c65f3fSEvan Quan return is_support_cclk_dpm(adev); 212779c65f3fSEvan Quan } 212879c65f3fSEvan Quan 212979c65f3fSEvan Quan int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 213079c65f3fSEvan Quan struct seq_file *m) 213179c65f3fSEvan Quan { 213279c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 213379c65f3fSEvan Quan 213479c65f3fSEvan Quan if (!pp_funcs->debugfs_print_current_performance_level) 213579c65f3fSEvan Quan return -EOPNOTSUPP; 213679c65f3fSEvan Quan 213779c65f3fSEvan Quan pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 213879c65f3fSEvan Quan m); 213979c65f3fSEvan Quan 214079c65f3fSEvan Quan return 0; 214179c65f3fSEvan Quan } 214279c65f3fSEvan Quan 214379c65f3fSEvan Quan int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 214479c65f3fSEvan Quan void **addr, 214579c65f3fSEvan Quan size_t *size) 214679c65f3fSEvan Quan { 214779c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 214879c65f3fSEvan Quan 214979c65f3fSEvan Quan if (!pp_funcs->get_smu_prv_buf_details) 215079c65f3fSEvan Quan return -ENOSYS; 215179c65f3fSEvan Quan 215279c65f3fSEvan Quan return pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 215379c65f3fSEvan Quan addr, 215479c65f3fSEvan Quan size); 215579c65f3fSEvan Quan } 215679c65f3fSEvan Quan 215779c65f3fSEvan Quan int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 215879c65f3fSEvan Quan { 215979c65f3fSEvan Quan struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 216079c65f3fSEvan Quan 216179c65f3fSEvan Quan if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 216279c65f3fSEvan Quan (is_support_sw_smu(adev) && adev->smu.is_apu) || 216379c65f3fSEvan Quan (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 216479c65f3fSEvan Quan return true; 216579c65f3fSEvan Quan 216679c65f3fSEvan Quan return false; 216779c65f3fSEvan Quan } 216879c65f3fSEvan Quan 216979c65f3fSEvan Quan int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 217079c65f3fSEvan Quan const char *buf, 217179c65f3fSEvan Quan size_t size) 217279c65f3fSEvan Quan { 217379c65f3fSEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 217479c65f3fSEvan Quan 217579c65f3fSEvan Quan if (!pp_funcs->set_pp_table) 217679c65f3fSEvan Quan return -EOPNOTSUPP; 217779c65f3fSEvan Quan 217879c65f3fSEvan Quan return pp_funcs->set_pp_table(adev->powerplay.pp_handle, 217979c65f3fSEvan Quan buf, 218079c65f3fSEvan Quan size); 218179c65f3fSEvan Quan } 218279c65f3fSEvan Quan 218379c65f3fSEvan Quan int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 218479c65f3fSEvan Quan { 218579c65f3fSEvan Quan return adev->smu.cpu_core_num; 218679c65f3fSEvan Quan } 218779c65f3fSEvan Quan 218879c65f3fSEvan Quan void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 218979c65f3fSEvan Quan { 219079c65f3fSEvan Quan if (!is_support_sw_smu(adev)) 219179c65f3fSEvan Quan return; 219279c65f3fSEvan Quan 219379c65f3fSEvan Quan amdgpu_smu_stb_debug_fs_init(adev); 219479c65f3fSEvan Quan } 219513f5dbd6SEvan Quan 219613f5dbd6SEvan Quan int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 219713f5dbd6SEvan Quan const struct amd_pp_display_configuration *input) 219813f5dbd6SEvan Quan { 219913f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 220013f5dbd6SEvan Quan 220113f5dbd6SEvan Quan if (!pp_funcs->display_configuration_change) 220213f5dbd6SEvan Quan return 0; 220313f5dbd6SEvan Quan 220413f5dbd6SEvan Quan return pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 220513f5dbd6SEvan Quan input); 220613f5dbd6SEvan Quan } 220713f5dbd6SEvan Quan 220813f5dbd6SEvan Quan int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 220913f5dbd6SEvan Quan enum amd_pp_clock_type type, 221013f5dbd6SEvan Quan struct amd_pp_clocks *clocks) 221113f5dbd6SEvan Quan { 221213f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 221313f5dbd6SEvan Quan 221413f5dbd6SEvan Quan if (!pp_funcs->get_clock_by_type) 221513f5dbd6SEvan Quan return 0; 221613f5dbd6SEvan Quan 221713f5dbd6SEvan Quan return pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 221813f5dbd6SEvan Quan type, 221913f5dbd6SEvan Quan clocks); 222013f5dbd6SEvan Quan } 222113f5dbd6SEvan Quan 222213f5dbd6SEvan Quan int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 222313f5dbd6SEvan Quan struct amd_pp_simple_clock_info *clocks) 222413f5dbd6SEvan Quan { 222513f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 222613f5dbd6SEvan Quan 222713f5dbd6SEvan Quan if (!pp_funcs->get_display_mode_validation_clocks) 222813f5dbd6SEvan Quan return 0; 222913f5dbd6SEvan Quan 223013f5dbd6SEvan Quan return pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 223113f5dbd6SEvan Quan clocks); 223213f5dbd6SEvan Quan } 223313f5dbd6SEvan Quan 223413f5dbd6SEvan Quan int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 223513f5dbd6SEvan Quan enum amd_pp_clock_type type, 223613f5dbd6SEvan Quan struct pp_clock_levels_with_latency *clocks) 223713f5dbd6SEvan Quan { 223813f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 223913f5dbd6SEvan Quan 224013f5dbd6SEvan Quan if (!pp_funcs->get_clock_by_type_with_latency) 224113f5dbd6SEvan Quan return 0; 224213f5dbd6SEvan Quan 224313f5dbd6SEvan Quan return pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 224413f5dbd6SEvan Quan type, 224513f5dbd6SEvan Quan clocks); 224613f5dbd6SEvan Quan } 224713f5dbd6SEvan Quan 224813f5dbd6SEvan Quan int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 224913f5dbd6SEvan Quan enum amd_pp_clock_type type, 225013f5dbd6SEvan Quan struct pp_clock_levels_with_voltage *clocks) 225113f5dbd6SEvan Quan { 225213f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 225313f5dbd6SEvan Quan 225413f5dbd6SEvan Quan if (!pp_funcs->get_clock_by_type_with_voltage) 225513f5dbd6SEvan Quan return 0; 225613f5dbd6SEvan Quan 225713f5dbd6SEvan Quan return pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 225813f5dbd6SEvan Quan type, 225913f5dbd6SEvan Quan clocks); 226013f5dbd6SEvan Quan } 226113f5dbd6SEvan Quan 226213f5dbd6SEvan Quan int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 226313f5dbd6SEvan Quan void *clock_ranges) 226413f5dbd6SEvan Quan { 226513f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 226613f5dbd6SEvan Quan 226713f5dbd6SEvan Quan if (!pp_funcs->set_watermarks_for_clocks_ranges) 226813f5dbd6SEvan Quan return -EOPNOTSUPP; 226913f5dbd6SEvan Quan 227013f5dbd6SEvan Quan return pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 227113f5dbd6SEvan Quan clock_ranges); 227213f5dbd6SEvan Quan } 227313f5dbd6SEvan Quan 227413f5dbd6SEvan Quan int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 227513f5dbd6SEvan Quan struct pp_display_clock_request *clock) 227613f5dbd6SEvan Quan { 227713f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 227813f5dbd6SEvan Quan 227913f5dbd6SEvan Quan if (!pp_funcs->display_clock_voltage_request) 228013f5dbd6SEvan Quan return -EOPNOTSUPP; 228113f5dbd6SEvan Quan 228213f5dbd6SEvan Quan return pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 228313f5dbd6SEvan Quan clock); 228413f5dbd6SEvan Quan } 228513f5dbd6SEvan Quan 228613f5dbd6SEvan Quan int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 228713f5dbd6SEvan Quan struct amd_pp_clock_info *clocks) 228813f5dbd6SEvan Quan { 228913f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 229013f5dbd6SEvan Quan 229113f5dbd6SEvan Quan if (!pp_funcs->get_current_clocks) 229213f5dbd6SEvan Quan return -EOPNOTSUPP; 229313f5dbd6SEvan Quan 229413f5dbd6SEvan Quan return pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 229513f5dbd6SEvan Quan clocks); 229613f5dbd6SEvan Quan } 229713f5dbd6SEvan Quan 229813f5dbd6SEvan Quan void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 229913f5dbd6SEvan Quan { 230013f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 230113f5dbd6SEvan Quan 230213f5dbd6SEvan Quan if (!pp_funcs->notify_smu_enable_pwe) 230313f5dbd6SEvan Quan return; 230413f5dbd6SEvan Quan 230513f5dbd6SEvan Quan pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 230613f5dbd6SEvan Quan } 230713f5dbd6SEvan Quan 230813f5dbd6SEvan Quan int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 230913f5dbd6SEvan Quan uint32_t count) 231013f5dbd6SEvan Quan { 231113f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 231213f5dbd6SEvan Quan 231313f5dbd6SEvan Quan if (!pp_funcs->set_active_display_count) 231413f5dbd6SEvan Quan return -EOPNOTSUPP; 231513f5dbd6SEvan Quan 231613f5dbd6SEvan Quan return pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 231713f5dbd6SEvan Quan count); 231813f5dbd6SEvan Quan } 231913f5dbd6SEvan Quan 232013f5dbd6SEvan Quan int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 232113f5dbd6SEvan Quan uint32_t clock) 232213f5dbd6SEvan Quan { 232313f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 232413f5dbd6SEvan Quan 232513f5dbd6SEvan Quan if (!pp_funcs->set_min_deep_sleep_dcefclk) 232613f5dbd6SEvan Quan return -EOPNOTSUPP; 232713f5dbd6SEvan Quan 232813f5dbd6SEvan Quan return pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 232913f5dbd6SEvan Quan clock); 233013f5dbd6SEvan Quan } 233113f5dbd6SEvan Quan 233213f5dbd6SEvan Quan void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 233313f5dbd6SEvan Quan uint32_t clock) 233413f5dbd6SEvan Quan { 233513f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 233613f5dbd6SEvan Quan 233713f5dbd6SEvan Quan if (!pp_funcs->set_hard_min_dcefclk_by_freq) 233813f5dbd6SEvan Quan return; 233913f5dbd6SEvan Quan 234013f5dbd6SEvan Quan pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 234113f5dbd6SEvan Quan clock); 234213f5dbd6SEvan Quan } 234313f5dbd6SEvan Quan 234413f5dbd6SEvan Quan void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 234513f5dbd6SEvan Quan uint32_t clock) 234613f5dbd6SEvan Quan { 234713f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 234813f5dbd6SEvan Quan 234913f5dbd6SEvan Quan if (!pp_funcs->set_hard_min_fclk_by_freq) 235013f5dbd6SEvan Quan return; 235113f5dbd6SEvan Quan 235213f5dbd6SEvan Quan pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 235313f5dbd6SEvan Quan clock); 235413f5dbd6SEvan Quan } 235513f5dbd6SEvan Quan 235613f5dbd6SEvan Quan int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 235713f5dbd6SEvan Quan bool disable_memory_clock_switch) 235813f5dbd6SEvan Quan { 235913f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 236013f5dbd6SEvan Quan 236113f5dbd6SEvan Quan if (!pp_funcs->display_disable_memory_clock_switch) 236213f5dbd6SEvan Quan return 0; 236313f5dbd6SEvan Quan 236413f5dbd6SEvan Quan return pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 236513f5dbd6SEvan Quan disable_memory_clock_switch); 236613f5dbd6SEvan Quan } 236713f5dbd6SEvan Quan 236813f5dbd6SEvan Quan int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 236913f5dbd6SEvan Quan struct pp_smu_nv_clock_table *max_clocks) 237013f5dbd6SEvan Quan { 237113f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 237213f5dbd6SEvan Quan 237313f5dbd6SEvan Quan if (!pp_funcs->get_max_sustainable_clocks_by_dc) 237413f5dbd6SEvan Quan return -EOPNOTSUPP; 237513f5dbd6SEvan Quan 237613f5dbd6SEvan Quan return pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 237713f5dbd6SEvan Quan max_clocks); 237813f5dbd6SEvan Quan } 237913f5dbd6SEvan Quan 238013f5dbd6SEvan Quan enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 238113f5dbd6SEvan Quan unsigned int *clock_values_in_khz, 238213f5dbd6SEvan Quan unsigned int *num_states) 238313f5dbd6SEvan Quan { 238413f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 238513f5dbd6SEvan Quan 238613f5dbd6SEvan Quan if (!pp_funcs->get_uclk_dpm_states) 238713f5dbd6SEvan Quan return -EOPNOTSUPP; 238813f5dbd6SEvan Quan 238913f5dbd6SEvan Quan return pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 239013f5dbd6SEvan Quan clock_values_in_khz, 239113f5dbd6SEvan Quan num_states); 239213f5dbd6SEvan Quan } 239313f5dbd6SEvan Quan 239413f5dbd6SEvan Quan int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 239513f5dbd6SEvan Quan struct dpm_clocks *clock_table) 239613f5dbd6SEvan Quan { 239713f5dbd6SEvan Quan const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 239813f5dbd6SEvan Quan 239913f5dbd6SEvan Quan if (!pp_funcs->get_dpm_clock_table) 240013f5dbd6SEvan Quan return -EOPNOTSUPP; 240113f5dbd6SEvan Quan 240213f5dbd6SEvan Quan return pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 240313f5dbd6SEvan Quan clock_table); 240413f5dbd6SEvan Quan } 2405