1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25 26 #include <drm/drm_debugfs.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_drv.h" 30 #include "amdgpu_pm.h" 31 #include "amdgpu_dpm.h" 32 #include "amdgpu_smu.h" 33 #include "atom.h" 34 #include <linux/pci.h> 35 #include <linux/hwmon.h> 36 #include <linux/hwmon-sysfs.h> 37 #include <linux/nospec.h> 38 #include <linux/pm_runtime.h> 39 #include "hwmgr.h" 40 41 static const struct cg_flag_name clocks[] = { 42 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, 43 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 44 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 45 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 46 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 47 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 48 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 49 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 50 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 51 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 52 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 53 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 54 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 55 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 56 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 57 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 58 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 59 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 60 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 61 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 62 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 63 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 64 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 65 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 66 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 67 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"}, 68 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"}, 69 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"}, 70 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"}, 71 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"}, 72 73 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 74 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 75 {0, NULL}, 76 }; 77 78 static const struct hwmon_temp_label { 79 enum PP_HWMON_TEMP channel; 80 const char *label; 81 } temp_label[] = { 82 {PP_TEMP_EDGE, "edge"}, 83 {PP_TEMP_JUNCTION, "junction"}, 84 {PP_TEMP_MEM, "mem"}, 85 }; 86 87 /** 88 * DOC: power_dpm_state 89 * 90 * The power_dpm_state file is a legacy interface and is only provided for 91 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 92 * certain power related parameters. The file power_dpm_state is used for this. 93 * It accepts the following arguments: 94 * 95 * - battery 96 * 97 * - balanced 98 * 99 * - performance 100 * 101 * battery 102 * 103 * On older GPUs, the vbios provided a special power state for battery 104 * operation. Selecting battery switched to this state. This is no 105 * longer provided on newer GPUs so the option does nothing in that case. 106 * 107 * balanced 108 * 109 * On older GPUs, the vbios provided a special power state for balanced 110 * operation. Selecting balanced switched to this state. This is no 111 * longer provided on newer GPUs so the option does nothing in that case. 112 * 113 * performance 114 * 115 * On older GPUs, the vbios provided a special power state for performance 116 * operation. Selecting performance switched to this state. This is no 117 * longer provided on newer GPUs so the option does nothing in that case. 118 * 119 */ 120 121 static ssize_t amdgpu_get_power_dpm_state(struct device *dev, 122 struct device_attribute *attr, 123 char *buf) 124 { 125 struct drm_device *ddev = dev_get_drvdata(dev); 126 struct amdgpu_device *adev = drm_to_adev(ddev); 127 enum amd_pm_state_type pm; 128 int ret; 129 130 if (amdgpu_in_reset(adev)) 131 return -EPERM; 132 133 ret = pm_runtime_get_sync(ddev->dev); 134 if (ret < 0) { 135 pm_runtime_put_autosuspend(ddev->dev); 136 return ret; 137 } 138 139 if (is_support_sw_smu(adev)) { 140 if (adev->smu.ppt_funcs->get_current_power_state) 141 pm = smu_get_current_power_state(&adev->smu); 142 else 143 pm = adev->pm.dpm.user_state; 144 } else if (adev->powerplay.pp_funcs->get_current_power_state) { 145 pm = amdgpu_dpm_get_current_power_state(adev); 146 } else { 147 pm = adev->pm.dpm.user_state; 148 } 149 150 pm_runtime_mark_last_busy(ddev->dev); 151 pm_runtime_put_autosuspend(ddev->dev); 152 153 return snprintf(buf, PAGE_SIZE, "%s\n", 154 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 155 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 156 } 157 158 static ssize_t amdgpu_set_power_dpm_state(struct device *dev, 159 struct device_attribute *attr, 160 const char *buf, 161 size_t count) 162 { 163 struct drm_device *ddev = dev_get_drvdata(dev); 164 struct amdgpu_device *adev = drm_to_adev(ddev); 165 enum amd_pm_state_type state; 166 int ret; 167 168 if (amdgpu_in_reset(adev)) 169 return -EPERM; 170 171 if (strncmp("battery", buf, strlen("battery")) == 0) 172 state = POWER_STATE_TYPE_BATTERY; 173 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 174 state = POWER_STATE_TYPE_BALANCED; 175 else if (strncmp("performance", buf, strlen("performance")) == 0) 176 state = POWER_STATE_TYPE_PERFORMANCE; 177 else 178 return -EINVAL; 179 180 ret = pm_runtime_get_sync(ddev->dev); 181 if (ret < 0) { 182 pm_runtime_put_autosuspend(ddev->dev); 183 return ret; 184 } 185 186 if (is_support_sw_smu(adev)) { 187 mutex_lock(&adev->pm.mutex); 188 adev->pm.dpm.user_state = state; 189 mutex_unlock(&adev->pm.mutex); 190 } else if (adev->powerplay.pp_funcs->dispatch_tasks) { 191 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 192 } else { 193 mutex_lock(&adev->pm.mutex); 194 adev->pm.dpm.user_state = state; 195 mutex_unlock(&adev->pm.mutex); 196 197 amdgpu_pm_compute_clocks(adev); 198 } 199 pm_runtime_mark_last_busy(ddev->dev); 200 pm_runtime_put_autosuspend(ddev->dev); 201 202 return count; 203 } 204 205 206 /** 207 * DOC: power_dpm_force_performance_level 208 * 209 * The amdgpu driver provides a sysfs API for adjusting certain power 210 * related parameters. The file power_dpm_force_performance_level is 211 * used for this. It accepts the following arguments: 212 * 213 * - auto 214 * 215 * - low 216 * 217 * - high 218 * 219 * - manual 220 * 221 * - profile_standard 222 * 223 * - profile_min_sclk 224 * 225 * - profile_min_mclk 226 * 227 * - profile_peak 228 * 229 * auto 230 * 231 * When auto is selected, the driver will attempt to dynamically select 232 * the optimal power profile for current conditions in the driver. 233 * 234 * low 235 * 236 * When low is selected, the clocks are forced to the lowest power state. 237 * 238 * high 239 * 240 * When high is selected, the clocks are forced to the highest power state. 241 * 242 * manual 243 * 244 * When manual is selected, the user can manually adjust which power states 245 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 246 * and pp_dpm_pcie files and adjust the power state transition heuristics 247 * via the pp_power_profile_mode sysfs file. 248 * 249 * profile_standard 250 * profile_min_sclk 251 * profile_min_mclk 252 * profile_peak 253 * 254 * When the profiling modes are selected, clock and power gating are 255 * disabled and the clocks are set for different profiling cases. This 256 * mode is recommended for profiling specific work loads where you do 257 * not want clock or power gating for clock fluctuation to interfere 258 * with your results. profile_standard sets the clocks to a fixed clock 259 * level which varies from asic to asic. profile_min_sclk forces the sclk 260 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 261 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 262 * 263 */ 264 265 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, 266 struct device_attribute *attr, 267 char *buf) 268 { 269 struct drm_device *ddev = dev_get_drvdata(dev); 270 struct amdgpu_device *adev = drm_to_adev(ddev); 271 enum amd_dpm_forced_level level = 0xff; 272 int ret; 273 274 if (amdgpu_in_reset(adev)) 275 return -EPERM; 276 277 ret = pm_runtime_get_sync(ddev->dev); 278 if (ret < 0) { 279 pm_runtime_put_autosuspend(ddev->dev); 280 return ret; 281 } 282 283 if (is_support_sw_smu(adev)) 284 level = smu_get_performance_level(&adev->smu); 285 else if (adev->powerplay.pp_funcs->get_performance_level) 286 level = amdgpu_dpm_get_performance_level(adev); 287 else 288 level = adev->pm.dpm.forced_level; 289 290 pm_runtime_mark_last_busy(ddev->dev); 291 pm_runtime_put_autosuspend(ddev->dev); 292 293 return snprintf(buf, PAGE_SIZE, "%s\n", 294 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 295 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 296 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 297 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 298 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 299 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 300 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 301 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 302 "unknown"); 303 } 304 305 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, 306 struct device_attribute *attr, 307 const char *buf, 308 size_t count) 309 { 310 struct drm_device *ddev = dev_get_drvdata(dev); 311 struct amdgpu_device *adev = drm_to_adev(ddev); 312 enum amd_dpm_forced_level level; 313 enum amd_dpm_forced_level current_level = 0xff; 314 int ret = 0; 315 316 if (amdgpu_in_reset(adev)) 317 return -EPERM; 318 319 if (strncmp("low", buf, strlen("low")) == 0) { 320 level = AMD_DPM_FORCED_LEVEL_LOW; 321 } else if (strncmp("high", buf, strlen("high")) == 0) { 322 level = AMD_DPM_FORCED_LEVEL_HIGH; 323 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 324 level = AMD_DPM_FORCED_LEVEL_AUTO; 325 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 326 level = AMD_DPM_FORCED_LEVEL_MANUAL; 327 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 328 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 329 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 330 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 331 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 332 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 333 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 334 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 335 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 336 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 337 } else { 338 return -EINVAL; 339 } 340 341 ret = pm_runtime_get_sync(ddev->dev); 342 if (ret < 0) { 343 pm_runtime_put_autosuspend(ddev->dev); 344 return ret; 345 } 346 347 if (is_support_sw_smu(adev)) 348 current_level = smu_get_performance_level(&adev->smu); 349 else if (adev->powerplay.pp_funcs->get_performance_level) 350 current_level = amdgpu_dpm_get_performance_level(adev); 351 352 if (current_level == level) { 353 pm_runtime_mark_last_busy(ddev->dev); 354 pm_runtime_put_autosuspend(ddev->dev); 355 return count; 356 } 357 358 if (adev->asic_type == CHIP_RAVEN) { 359 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 360 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) 361 amdgpu_gfx_off_ctrl(adev, false); 362 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) 363 amdgpu_gfx_off_ctrl(adev, true); 364 } 365 } 366 367 /* profile_exit setting is valid only when current mode is in profile mode */ 368 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 369 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 370 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 371 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && 372 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 373 pr_err("Currently not in any profile mode!\n"); 374 pm_runtime_mark_last_busy(ddev->dev); 375 pm_runtime_put_autosuspend(ddev->dev); 376 return -EINVAL; 377 } 378 379 if (is_support_sw_smu(adev)) { 380 ret = smu_force_performance_level(&adev->smu, level); 381 if (ret) { 382 pm_runtime_mark_last_busy(ddev->dev); 383 pm_runtime_put_autosuspend(ddev->dev); 384 return -EINVAL; 385 } 386 } else if (adev->powerplay.pp_funcs->force_performance_level) { 387 mutex_lock(&adev->pm.mutex); 388 if (adev->pm.dpm.thermal_active) { 389 mutex_unlock(&adev->pm.mutex); 390 pm_runtime_mark_last_busy(ddev->dev); 391 pm_runtime_put_autosuspend(ddev->dev); 392 return -EINVAL; 393 } 394 ret = amdgpu_dpm_force_performance_level(adev, level); 395 if (ret) { 396 mutex_unlock(&adev->pm.mutex); 397 pm_runtime_mark_last_busy(ddev->dev); 398 pm_runtime_put_autosuspend(ddev->dev); 399 return -EINVAL; 400 } else { 401 adev->pm.dpm.forced_level = level; 402 } 403 mutex_unlock(&adev->pm.mutex); 404 } 405 pm_runtime_mark_last_busy(ddev->dev); 406 pm_runtime_put_autosuspend(ddev->dev); 407 408 return count; 409 } 410 411 static ssize_t amdgpu_get_pp_num_states(struct device *dev, 412 struct device_attribute *attr, 413 char *buf) 414 { 415 struct drm_device *ddev = dev_get_drvdata(dev); 416 struct amdgpu_device *adev = drm_to_adev(ddev); 417 struct pp_states_info data; 418 int i, buf_len, ret; 419 420 if (amdgpu_in_reset(adev)) 421 return -EPERM; 422 423 ret = pm_runtime_get_sync(ddev->dev); 424 if (ret < 0) { 425 pm_runtime_put_autosuspend(ddev->dev); 426 return ret; 427 } 428 429 if (is_support_sw_smu(adev)) { 430 ret = smu_get_power_num_states(&adev->smu, &data); 431 if (ret) 432 return ret; 433 } else if (adev->powerplay.pp_funcs->get_pp_num_states) { 434 amdgpu_dpm_get_pp_num_states(adev, &data); 435 } else { 436 memset(&data, 0, sizeof(data)); 437 } 438 439 pm_runtime_mark_last_busy(ddev->dev); 440 pm_runtime_put_autosuspend(ddev->dev); 441 442 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 443 for (i = 0; i < data.nums; i++) 444 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 445 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 446 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 447 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 448 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 449 450 return buf_len; 451 } 452 453 static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 454 struct device_attribute *attr, 455 char *buf) 456 { 457 struct drm_device *ddev = dev_get_drvdata(dev); 458 struct amdgpu_device *adev = drm_to_adev(ddev); 459 struct pp_states_info data; 460 struct smu_context *smu = &adev->smu; 461 enum amd_pm_state_type pm = 0; 462 int i = 0, ret = 0; 463 464 if (amdgpu_in_reset(adev)) 465 return -EPERM; 466 467 ret = pm_runtime_get_sync(ddev->dev); 468 if (ret < 0) { 469 pm_runtime_put_autosuspend(ddev->dev); 470 return ret; 471 } 472 473 if (is_support_sw_smu(adev)) { 474 pm = smu_get_current_power_state(smu); 475 ret = smu_get_power_num_states(smu, &data); 476 if (ret) 477 return ret; 478 } else if (adev->powerplay.pp_funcs->get_current_power_state 479 && adev->powerplay.pp_funcs->get_pp_num_states) { 480 pm = amdgpu_dpm_get_current_power_state(adev); 481 amdgpu_dpm_get_pp_num_states(adev, &data); 482 } 483 484 pm_runtime_mark_last_busy(ddev->dev); 485 pm_runtime_put_autosuspend(ddev->dev); 486 487 for (i = 0; i < data.nums; i++) { 488 if (pm == data.states[i]) 489 break; 490 } 491 492 if (i == data.nums) 493 i = -EINVAL; 494 495 return snprintf(buf, PAGE_SIZE, "%d\n", i); 496 } 497 498 static ssize_t amdgpu_get_pp_force_state(struct device *dev, 499 struct device_attribute *attr, 500 char *buf) 501 { 502 struct drm_device *ddev = dev_get_drvdata(dev); 503 struct amdgpu_device *adev = drm_to_adev(ddev); 504 505 if (amdgpu_in_reset(adev)) 506 return -EPERM; 507 508 if (adev->pp_force_state_enabled) 509 return amdgpu_get_pp_cur_state(dev, attr, buf); 510 else 511 return snprintf(buf, PAGE_SIZE, "\n"); 512 } 513 514 static ssize_t amdgpu_set_pp_force_state(struct device *dev, 515 struct device_attribute *attr, 516 const char *buf, 517 size_t count) 518 { 519 struct drm_device *ddev = dev_get_drvdata(dev); 520 struct amdgpu_device *adev = drm_to_adev(ddev); 521 enum amd_pm_state_type state = 0; 522 unsigned long idx; 523 int ret; 524 525 if (amdgpu_in_reset(adev)) 526 return -EPERM; 527 528 if (strlen(buf) == 1) 529 adev->pp_force_state_enabled = false; 530 else if (is_support_sw_smu(adev)) 531 adev->pp_force_state_enabled = false; 532 else if (adev->powerplay.pp_funcs->dispatch_tasks && 533 adev->powerplay.pp_funcs->get_pp_num_states) { 534 struct pp_states_info data; 535 536 ret = kstrtoul(buf, 0, &idx); 537 if (ret || idx >= ARRAY_SIZE(data.states)) 538 return -EINVAL; 539 540 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 541 542 amdgpu_dpm_get_pp_num_states(adev, &data); 543 state = data.states[idx]; 544 545 ret = pm_runtime_get_sync(ddev->dev); 546 if (ret < 0) { 547 pm_runtime_put_autosuspend(ddev->dev); 548 return ret; 549 } 550 551 /* only set user selected power states */ 552 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 553 state != POWER_STATE_TYPE_DEFAULT) { 554 amdgpu_dpm_dispatch_task(adev, 555 AMD_PP_TASK_ENABLE_USER_STATE, &state); 556 adev->pp_force_state_enabled = true; 557 } 558 pm_runtime_mark_last_busy(ddev->dev); 559 pm_runtime_put_autosuspend(ddev->dev); 560 } 561 562 return count; 563 } 564 565 /** 566 * DOC: pp_table 567 * 568 * The amdgpu driver provides a sysfs API for uploading new powerplay 569 * tables. The file pp_table is used for this. Reading the file 570 * will dump the current power play table. Writing to the file 571 * will attempt to upload a new powerplay table and re-initialize 572 * powerplay using that new table. 573 * 574 */ 575 576 static ssize_t amdgpu_get_pp_table(struct device *dev, 577 struct device_attribute *attr, 578 char *buf) 579 { 580 struct drm_device *ddev = dev_get_drvdata(dev); 581 struct amdgpu_device *adev = drm_to_adev(ddev); 582 char *table = NULL; 583 int size, ret; 584 585 if (amdgpu_in_reset(adev)) 586 return -EPERM; 587 588 ret = pm_runtime_get_sync(ddev->dev); 589 if (ret < 0) { 590 pm_runtime_put_autosuspend(ddev->dev); 591 return ret; 592 } 593 594 if (is_support_sw_smu(adev)) { 595 size = smu_sys_get_pp_table(&adev->smu, (void **)&table); 596 pm_runtime_mark_last_busy(ddev->dev); 597 pm_runtime_put_autosuspend(ddev->dev); 598 if (size < 0) 599 return size; 600 } else if (adev->powerplay.pp_funcs->get_pp_table) { 601 size = amdgpu_dpm_get_pp_table(adev, &table); 602 pm_runtime_mark_last_busy(ddev->dev); 603 pm_runtime_put_autosuspend(ddev->dev); 604 if (size < 0) 605 return size; 606 } else { 607 pm_runtime_mark_last_busy(ddev->dev); 608 pm_runtime_put_autosuspend(ddev->dev); 609 return 0; 610 } 611 612 if (size >= PAGE_SIZE) 613 size = PAGE_SIZE - 1; 614 615 memcpy(buf, table, size); 616 617 return size; 618 } 619 620 static ssize_t amdgpu_set_pp_table(struct device *dev, 621 struct device_attribute *attr, 622 const char *buf, 623 size_t count) 624 { 625 struct drm_device *ddev = dev_get_drvdata(dev); 626 struct amdgpu_device *adev = drm_to_adev(ddev); 627 int ret = 0; 628 629 if (amdgpu_in_reset(adev)) 630 return -EPERM; 631 632 ret = pm_runtime_get_sync(ddev->dev); 633 if (ret < 0) { 634 pm_runtime_put_autosuspend(ddev->dev); 635 return ret; 636 } 637 638 if (is_support_sw_smu(adev)) { 639 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); 640 if (ret) { 641 pm_runtime_mark_last_busy(ddev->dev); 642 pm_runtime_put_autosuspend(ddev->dev); 643 return ret; 644 } 645 } else if (adev->powerplay.pp_funcs->set_pp_table) 646 amdgpu_dpm_set_pp_table(adev, buf, count); 647 648 pm_runtime_mark_last_busy(ddev->dev); 649 pm_runtime_put_autosuspend(ddev->dev); 650 651 return count; 652 } 653 654 /** 655 * DOC: pp_od_clk_voltage 656 * 657 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 658 * in each power level within a power state. The pp_od_clk_voltage is used for 659 * this. 660 * 661 * Note that the actual memory controller clock rate are exposed, not 662 * the effective memory clock of the DRAMs. To translate it, use the 663 * following formula: 664 * 665 * Clock conversion (Mhz): 666 * 667 * HBM: effective_memory_clock = memory_controller_clock * 1 668 * 669 * G5: effective_memory_clock = memory_controller_clock * 1 670 * 671 * G6: effective_memory_clock = memory_controller_clock * 2 672 * 673 * DRAM data rate (MT/s): 674 * 675 * HBM: effective_memory_clock * 2 = data_rate 676 * 677 * G5: effective_memory_clock * 4 = data_rate 678 * 679 * G6: effective_memory_clock * 8 = data_rate 680 * 681 * Bandwidth (MB/s): 682 * 683 * data_rate * vram_bit_width / 8 = memory_bandwidth 684 * 685 * Some examples: 686 * 687 * G5 on RX460: 688 * 689 * memory_controller_clock = 1750 Mhz 690 * 691 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz 692 * 693 * data rate = 1750 * 4 = 7000 MT/s 694 * 695 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s 696 * 697 * G6 on RX5700: 698 * 699 * memory_controller_clock = 875 Mhz 700 * 701 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz 702 * 703 * data rate = 1750 * 8 = 14000 MT/s 704 * 705 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s 706 * 707 * < For Vega10 and previous ASICs > 708 * 709 * Reading the file will display: 710 * 711 * - a list of engine clock levels and voltages labeled OD_SCLK 712 * 713 * - a list of memory clock levels and voltages labeled OD_MCLK 714 * 715 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 716 * 717 * To manually adjust these settings, first select manual using 718 * power_dpm_force_performance_level. Enter a new value for each 719 * level by writing a string that contains "s/m level clock voltage" to 720 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 721 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 722 * 810 mV. When you have edited all of the states as needed, write 723 * "c" (commit) to the file to commit your changes. If you want to reset to the 724 * default power levels, write "r" (reset) to the file to reset them. 725 * 726 * 727 * < For Vega20 and newer ASICs > 728 * 729 * Reading the file will display: 730 * 731 * - minimum and maximum engine clock labeled OD_SCLK 732 * 733 * - minimum(not available for Vega20 and Navi1x) and maximum memory 734 * clock labeled OD_MCLK 735 * 736 * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 737 * They can be used to calibrate the sclk voltage curve. 738 * 739 * - voltage offset(in mV) applied on target voltage calculation. 740 * This is available for Sienna Cichlid, Navy Flounder and Dimgrey 741 * Cavefish. For these ASICs, the target voltage calculation can be 742 * illustrated by "voltage = voltage calculated from v/f curve + 743 * overdrive vddgfx offset" 744 * 745 * - a list of valid ranges for sclk, mclk, and voltage curve points 746 * labeled OD_RANGE 747 * 748 * To manually adjust these settings: 749 * 750 * - First select manual using power_dpm_force_performance_level 751 * 752 * - For clock frequency setting, enter a new value by writing a 753 * string that contains "s/m index clock" to the file. The index 754 * should be 0 if to set minimum clock. And 1 if to set maximum 755 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 756 * "m 1 800" will update maximum mclk to be 800Mhz. 757 * 758 * For sclk voltage curve, enter the new values by writing a 759 * string that contains "vc point clock voltage" to the file. The 760 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will 761 * update point1 with clock set as 300Mhz and voltage as 762 * 600mV. "vc 2 1000 1000" will update point3 with clock set 763 * as 1000Mhz and voltage 1000mV. 764 * 765 * To update the voltage offset applied for gfxclk/voltage calculation, 766 * enter the new value by writing a string that contains "vo offset". 767 * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish. 768 * And the offset can be a positive or negative value. 769 * 770 * - When you have edited all of the states as needed, write "c" (commit) 771 * to the file to commit your changes 772 * 773 * - If you want to reset to the default power levels, write "r" (reset) 774 * to the file to reset them 775 * 776 */ 777 778 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 779 struct device_attribute *attr, 780 const char *buf, 781 size_t count) 782 { 783 struct drm_device *ddev = dev_get_drvdata(dev); 784 struct amdgpu_device *adev = drm_to_adev(ddev); 785 int ret; 786 uint32_t parameter_size = 0; 787 long parameter[64]; 788 char buf_cpy[128]; 789 char *tmp_str; 790 char *sub_str; 791 const char delimiter[3] = {' ', '\n', '\0'}; 792 uint32_t type; 793 794 if (amdgpu_in_reset(adev)) 795 return -EPERM; 796 797 if (count > 127) 798 return -EINVAL; 799 800 if (*buf == 's') 801 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 802 else if (*buf == 'm') 803 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 804 else if(*buf == 'r') 805 type = PP_OD_RESTORE_DEFAULT_TABLE; 806 else if (*buf == 'c') 807 type = PP_OD_COMMIT_DPM_TABLE; 808 else if (!strncmp(buf, "vc", 2)) 809 type = PP_OD_EDIT_VDDC_CURVE; 810 else if (!strncmp(buf, "vo", 2)) 811 type = PP_OD_EDIT_VDDGFX_OFFSET; 812 else 813 return -EINVAL; 814 815 memcpy(buf_cpy, buf, count+1); 816 817 tmp_str = buf_cpy; 818 819 if ((type == PP_OD_EDIT_VDDC_CURVE) || 820 (type == PP_OD_EDIT_VDDGFX_OFFSET)) 821 tmp_str++; 822 while (isspace(*++tmp_str)); 823 824 while (tmp_str[0]) { 825 sub_str = strsep(&tmp_str, delimiter); 826 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 827 if (ret) 828 return -EINVAL; 829 parameter_size++; 830 831 while (isspace(*tmp_str)) 832 tmp_str++; 833 } 834 835 ret = pm_runtime_get_sync(ddev->dev); 836 if (ret < 0) { 837 pm_runtime_put_autosuspend(ddev->dev); 838 return ret; 839 } 840 841 if (is_support_sw_smu(adev)) { 842 ret = smu_od_edit_dpm_table(&adev->smu, type, 843 parameter, parameter_size); 844 845 if (ret) { 846 pm_runtime_mark_last_busy(ddev->dev); 847 pm_runtime_put_autosuspend(ddev->dev); 848 return -EINVAL; 849 } 850 } else { 851 852 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { 853 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, 854 parameter, 855 parameter_size); 856 if (ret) { 857 pm_runtime_mark_last_busy(ddev->dev); 858 pm_runtime_put_autosuspend(ddev->dev); 859 return -EINVAL; 860 } 861 } 862 863 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { 864 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 865 parameter, parameter_size); 866 if (ret) { 867 pm_runtime_mark_last_busy(ddev->dev); 868 pm_runtime_put_autosuspend(ddev->dev); 869 return -EINVAL; 870 } 871 } 872 873 if (type == PP_OD_COMMIT_DPM_TABLE) { 874 if (adev->powerplay.pp_funcs->dispatch_tasks) { 875 amdgpu_dpm_dispatch_task(adev, 876 AMD_PP_TASK_READJUST_POWER_STATE, 877 NULL); 878 pm_runtime_mark_last_busy(ddev->dev); 879 pm_runtime_put_autosuspend(ddev->dev); 880 return count; 881 } else { 882 pm_runtime_mark_last_busy(ddev->dev); 883 pm_runtime_put_autosuspend(ddev->dev); 884 return -EINVAL; 885 } 886 } 887 } 888 pm_runtime_mark_last_busy(ddev->dev); 889 pm_runtime_put_autosuspend(ddev->dev); 890 891 return count; 892 } 893 894 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 895 struct device_attribute *attr, 896 char *buf) 897 { 898 struct drm_device *ddev = dev_get_drvdata(dev); 899 struct amdgpu_device *adev = drm_to_adev(ddev); 900 ssize_t size; 901 int ret; 902 903 if (amdgpu_in_reset(adev)) 904 return -EPERM; 905 906 ret = pm_runtime_get_sync(ddev->dev); 907 if (ret < 0) { 908 pm_runtime_put_autosuspend(ddev->dev); 909 return ret; 910 } 911 912 if (is_support_sw_smu(adev)) { 913 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); 914 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); 915 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); 916 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size); 917 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); 918 } else if (adev->powerplay.pp_funcs->print_clock_levels) { 919 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 920 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 921 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); 922 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 923 } else { 924 size = snprintf(buf, PAGE_SIZE, "\n"); 925 } 926 pm_runtime_mark_last_busy(ddev->dev); 927 pm_runtime_put_autosuspend(ddev->dev); 928 929 return size; 930 } 931 932 /** 933 * DOC: pp_features 934 * 935 * The amdgpu driver provides a sysfs API for adjusting what powerplay 936 * features to be enabled. The file pp_features is used for this. And 937 * this is only available for Vega10 and later dGPUs. 938 * 939 * Reading back the file will show you the followings: 940 * - Current ppfeature masks 941 * - List of the all supported powerplay features with their naming, 942 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 943 * 944 * To manually enable or disable a specific feature, just set or clear 945 * the corresponding bit from original ppfeature masks and input the 946 * new ppfeature masks. 947 */ 948 static ssize_t amdgpu_set_pp_features(struct device *dev, 949 struct device_attribute *attr, 950 const char *buf, 951 size_t count) 952 { 953 struct drm_device *ddev = dev_get_drvdata(dev); 954 struct amdgpu_device *adev = drm_to_adev(ddev); 955 uint64_t featuremask; 956 int ret; 957 958 if (amdgpu_in_reset(adev)) 959 return -EPERM; 960 961 ret = kstrtou64(buf, 0, &featuremask); 962 if (ret) 963 return -EINVAL; 964 965 ret = pm_runtime_get_sync(ddev->dev); 966 if (ret < 0) { 967 pm_runtime_put_autosuspend(ddev->dev); 968 return ret; 969 } 970 971 if (is_support_sw_smu(adev)) { 972 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); 973 if (ret) { 974 pm_runtime_mark_last_busy(ddev->dev); 975 pm_runtime_put_autosuspend(ddev->dev); 976 return -EINVAL; 977 } 978 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { 979 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 980 if (ret) { 981 pm_runtime_mark_last_busy(ddev->dev); 982 pm_runtime_put_autosuspend(ddev->dev); 983 return -EINVAL; 984 } 985 } 986 pm_runtime_mark_last_busy(ddev->dev); 987 pm_runtime_put_autosuspend(ddev->dev); 988 989 return count; 990 } 991 992 static ssize_t amdgpu_get_pp_features(struct device *dev, 993 struct device_attribute *attr, 994 char *buf) 995 { 996 struct drm_device *ddev = dev_get_drvdata(dev); 997 struct amdgpu_device *adev = drm_to_adev(ddev); 998 ssize_t size; 999 int ret; 1000 1001 if (amdgpu_in_reset(adev)) 1002 return -EPERM; 1003 1004 ret = pm_runtime_get_sync(ddev->dev); 1005 if (ret < 0) { 1006 pm_runtime_put_autosuspend(ddev->dev); 1007 return ret; 1008 } 1009 1010 if (is_support_sw_smu(adev)) 1011 size = smu_sys_get_pp_feature_mask(&adev->smu, buf); 1012 else if (adev->powerplay.pp_funcs->get_ppfeature_status) 1013 size = amdgpu_dpm_get_ppfeature_status(adev, buf); 1014 else 1015 size = snprintf(buf, PAGE_SIZE, "\n"); 1016 1017 pm_runtime_mark_last_busy(ddev->dev); 1018 pm_runtime_put_autosuspend(ddev->dev); 1019 1020 return size; 1021 } 1022 1023 /** 1024 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie 1025 * 1026 * The amdgpu driver provides a sysfs API for adjusting what power levels 1027 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 1028 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 1029 * this. 1030 * 1031 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 1032 * Vega10 and later ASICs. 1033 * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 1034 * 1035 * Reading back the files will show you the available power levels within 1036 * the power state and the clock information for those levels. 1037 * 1038 * To manually adjust these states, first select manual using 1039 * power_dpm_force_performance_level. 1040 * Secondly, enter a new value for each level by inputing a string that 1041 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 1042 * E.g., 1043 * 1044 * .. code-block:: bash 1045 * 1046 * echo "4 5 6" > pp_dpm_sclk 1047 * 1048 * will enable sclk levels 4, 5, and 6. 1049 * 1050 * NOTE: change to the dcefclk max dpm level is not supported now 1051 */ 1052 1053 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 1054 struct device_attribute *attr, 1055 char *buf) 1056 { 1057 struct drm_device *ddev = dev_get_drvdata(dev); 1058 struct amdgpu_device *adev = drm_to_adev(ddev); 1059 ssize_t size; 1060 int ret; 1061 1062 if (amdgpu_in_reset(adev)) 1063 return -EPERM; 1064 1065 ret = pm_runtime_get_sync(ddev->dev); 1066 if (ret < 0) { 1067 pm_runtime_put_autosuspend(ddev->dev); 1068 return ret; 1069 } 1070 1071 if (is_support_sw_smu(adev)) 1072 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); 1073 else if (adev->powerplay.pp_funcs->print_clock_levels) 1074 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 1075 else 1076 size = snprintf(buf, PAGE_SIZE, "\n"); 1077 1078 pm_runtime_mark_last_busy(ddev->dev); 1079 pm_runtime_put_autosuspend(ddev->dev); 1080 1081 return size; 1082 } 1083 1084 /* 1085 * Worst case: 32 bits individually specified, in octal at 12 characters 1086 * per line (+1 for \n). 1087 */ 1088 #define AMDGPU_MASK_BUF_MAX (32 * 13) 1089 1090 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 1091 { 1092 int ret; 1093 long level; 1094 char *sub_str = NULL; 1095 char *tmp; 1096 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 1097 const char delimiter[3] = {' ', '\n', '\0'}; 1098 size_t bytes; 1099 1100 *mask = 0; 1101 1102 bytes = min(count, sizeof(buf_cpy) - 1); 1103 memcpy(buf_cpy, buf, bytes); 1104 buf_cpy[bytes] = '\0'; 1105 tmp = buf_cpy; 1106 while (tmp[0]) { 1107 sub_str = strsep(&tmp, delimiter); 1108 if (strlen(sub_str)) { 1109 ret = kstrtol(sub_str, 0, &level); 1110 if (ret) 1111 return -EINVAL; 1112 *mask |= 1 << level; 1113 } else 1114 break; 1115 } 1116 1117 return 0; 1118 } 1119 1120 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 1121 struct device_attribute *attr, 1122 const char *buf, 1123 size_t count) 1124 { 1125 struct drm_device *ddev = dev_get_drvdata(dev); 1126 struct amdgpu_device *adev = drm_to_adev(ddev); 1127 int ret; 1128 uint32_t mask = 0; 1129 1130 if (amdgpu_in_reset(adev)) 1131 return -EPERM; 1132 1133 ret = amdgpu_read_mask(buf, count, &mask); 1134 if (ret) 1135 return ret; 1136 1137 ret = pm_runtime_get_sync(ddev->dev); 1138 if (ret < 0) { 1139 pm_runtime_put_autosuspend(ddev->dev); 1140 return ret; 1141 } 1142 1143 if (is_support_sw_smu(adev)) 1144 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); 1145 else if (adev->powerplay.pp_funcs->force_clock_level) 1146 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 1147 1148 pm_runtime_mark_last_busy(ddev->dev); 1149 pm_runtime_put_autosuspend(ddev->dev); 1150 1151 if (ret) 1152 return -EINVAL; 1153 1154 return count; 1155 } 1156 1157 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 1158 struct device_attribute *attr, 1159 char *buf) 1160 { 1161 struct drm_device *ddev = dev_get_drvdata(dev); 1162 struct amdgpu_device *adev = drm_to_adev(ddev); 1163 ssize_t size; 1164 int ret; 1165 1166 if (amdgpu_in_reset(adev)) 1167 return -EPERM; 1168 1169 ret = pm_runtime_get_sync(ddev->dev); 1170 if (ret < 0) { 1171 pm_runtime_put_autosuspend(ddev->dev); 1172 return ret; 1173 } 1174 1175 if (is_support_sw_smu(adev)) 1176 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); 1177 else if (adev->powerplay.pp_funcs->print_clock_levels) 1178 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 1179 else 1180 size = snprintf(buf, PAGE_SIZE, "\n"); 1181 1182 pm_runtime_mark_last_busy(ddev->dev); 1183 pm_runtime_put_autosuspend(ddev->dev); 1184 1185 return size; 1186 } 1187 1188 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 1189 struct device_attribute *attr, 1190 const char *buf, 1191 size_t count) 1192 { 1193 struct drm_device *ddev = dev_get_drvdata(dev); 1194 struct amdgpu_device *adev = drm_to_adev(ddev); 1195 uint32_t mask = 0; 1196 int ret; 1197 1198 if (amdgpu_in_reset(adev)) 1199 return -EPERM; 1200 1201 ret = amdgpu_read_mask(buf, count, &mask); 1202 if (ret) 1203 return ret; 1204 1205 ret = pm_runtime_get_sync(ddev->dev); 1206 if (ret < 0) { 1207 pm_runtime_put_autosuspend(ddev->dev); 1208 return ret; 1209 } 1210 1211 if (is_support_sw_smu(adev)) 1212 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); 1213 else if (adev->powerplay.pp_funcs->force_clock_level) 1214 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 1215 1216 pm_runtime_mark_last_busy(ddev->dev); 1217 pm_runtime_put_autosuspend(ddev->dev); 1218 1219 if (ret) 1220 return -EINVAL; 1221 1222 return count; 1223 } 1224 1225 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 1226 struct device_attribute *attr, 1227 char *buf) 1228 { 1229 struct drm_device *ddev = dev_get_drvdata(dev); 1230 struct amdgpu_device *adev = drm_to_adev(ddev); 1231 ssize_t size; 1232 int ret; 1233 1234 if (amdgpu_in_reset(adev)) 1235 return -EPERM; 1236 1237 ret = pm_runtime_get_sync(ddev->dev); 1238 if (ret < 0) { 1239 pm_runtime_put_autosuspend(ddev->dev); 1240 return ret; 1241 } 1242 1243 if (is_support_sw_smu(adev)) 1244 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 1245 else if (adev->powerplay.pp_funcs->print_clock_levels) 1246 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); 1247 else 1248 size = snprintf(buf, PAGE_SIZE, "\n"); 1249 1250 pm_runtime_mark_last_busy(ddev->dev); 1251 pm_runtime_put_autosuspend(ddev->dev); 1252 1253 return size; 1254 } 1255 1256 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 1257 struct device_attribute *attr, 1258 const char *buf, 1259 size_t count) 1260 { 1261 struct drm_device *ddev = dev_get_drvdata(dev); 1262 struct amdgpu_device *adev = drm_to_adev(ddev); 1263 int ret; 1264 uint32_t mask = 0; 1265 1266 if (amdgpu_in_reset(adev)) 1267 return -EPERM; 1268 1269 ret = amdgpu_read_mask(buf, count, &mask); 1270 if (ret) 1271 return ret; 1272 1273 ret = pm_runtime_get_sync(ddev->dev); 1274 if (ret < 0) { 1275 pm_runtime_put_autosuspend(ddev->dev); 1276 return ret; 1277 } 1278 1279 if (is_support_sw_smu(adev)) 1280 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); 1281 else if (adev->powerplay.pp_funcs->force_clock_level) 1282 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); 1283 else 1284 ret = 0; 1285 1286 pm_runtime_mark_last_busy(ddev->dev); 1287 pm_runtime_put_autosuspend(ddev->dev); 1288 1289 if (ret) 1290 return -EINVAL; 1291 1292 return count; 1293 } 1294 1295 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1296 struct device_attribute *attr, 1297 char *buf) 1298 { 1299 struct drm_device *ddev = dev_get_drvdata(dev); 1300 struct amdgpu_device *adev = drm_to_adev(ddev); 1301 ssize_t size; 1302 int ret; 1303 1304 if (amdgpu_in_reset(adev)) 1305 return -EPERM; 1306 1307 ret = pm_runtime_get_sync(ddev->dev); 1308 if (ret < 0) { 1309 pm_runtime_put_autosuspend(ddev->dev); 1310 return ret; 1311 } 1312 1313 if (is_support_sw_smu(adev)) 1314 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1315 else if (adev->powerplay.pp_funcs->print_clock_levels) 1316 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); 1317 else 1318 size = snprintf(buf, PAGE_SIZE, "\n"); 1319 1320 pm_runtime_mark_last_busy(ddev->dev); 1321 pm_runtime_put_autosuspend(ddev->dev); 1322 1323 return size; 1324 } 1325 1326 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1327 struct device_attribute *attr, 1328 const char *buf, 1329 size_t count) 1330 { 1331 struct drm_device *ddev = dev_get_drvdata(dev); 1332 struct amdgpu_device *adev = drm_to_adev(ddev); 1333 int ret; 1334 uint32_t mask = 0; 1335 1336 if (amdgpu_in_reset(adev)) 1337 return -EPERM; 1338 1339 ret = amdgpu_read_mask(buf, count, &mask); 1340 if (ret) 1341 return ret; 1342 1343 ret = pm_runtime_get_sync(ddev->dev); 1344 if (ret < 0) { 1345 pm_runtime_put_autosuspend(ddev->dev); 1346 return ret; 1347 } 1348 1349 if (is_support_sw_smu(adev)) 1350 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); 1351 else if (adev->powerplay.pp_funcs->force_clock_level) 1352 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); 1353 else 1354 ret = 0; 1355 1356 pm_runtime_mark_last_busy(ddev->dev); 1357 pm_runtime_put_autosuspend(ddev->dev); 1358 1359 if (ret) 1360 return -EINVAL; 1361 1362 return count; 1363 } 1364 1365 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev, 1366 struct device_attribute *attr, 1367 char *buf) 1368 { 1369 struct drm_device *ddev = dev_get_drvdata(dev); 1370 struct amdgpu_device *adev = drm_to_adev(ddev); 1371 ssize_t size; 1372 int ret; 1373 1374 if (amdgpu_in_reset(adev)) 1375 return -EPERM; 1376 1377 ret = pm_runtime_get_sync(ddev->dev); 1378 if (ret < 0) { 1379 pm_runtime_put_autosuspend(ddev->dev); 1380 return ret; 1381 } 1382 1383 if (is_support_sw_smu(adev)) 1384 size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf); 1385 else 1386 size = snprintf(buf, PAGE_SIZE, "\n"); 1387 1388 pm_runtime_mark_last_busy(ddev->dev); 1389 pm_runtime_put_autosuspend(ddev->dev); 1390 1391 return size; 1392 } 1393 1394 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, 1395 struct device_attribute *attr, 1396 const char *buf, 1397 size_t count) 1398 { 1399 struct drm_device *ddev = dev_get_drvdata(dev); 1400 struct amdgpu_device *adev = drm_to_adev(ddev); 1401 int ret; 1402 uint32_t mask = 0; 1403 1404 if (amdgpu_in_reset(adev)) 1405 return -EPERM; 1406 1407 ret = amdgpu_read_mask(buf, count, &mask); 1408 if (ret) 1409 return ret; 1410 1411 ret = pm_runtime_get_sync(ddev->dev); 1412 if (ret < 0) { 1413 pm_runtime_put_autosuspend(ddev->dev); 1414 return ret; 1415 } 1416 1417 if (is_support_sw_smu(adev)) 1418 ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask); 1419 else 1420 ret = 0; 1421 1422 pm_runtime_mark_last_busy(ddev->dev); 1423 pm_runtime_put_autosuspend(ddev->dev); 1424 1425 if (ret) 1426 return -EINVAL; 1427 1428 return count; 1429 } 1430 1431 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev, 1432 struct device_attribute *attr, 1433 char *buf) 1434 { 1435 struct drm_device *ddev = dev_get_drvdata(dev); 1436 struct amdgpu_device *adev = drm_to_adev(ddev); 1437 ssize_t size; 1438 int ret; 1439 1440 if (amdgpu_in_reset(adev)) 1441 return -EPERM; 1442 1443 ret = pm_runtime_get_sync(ddev->dev); 1444 if (ret < 0) { 1445 pm_runtime_put_autosuspend(ddev->dev); 1446 return ret; 1447 } 1448 1449 if (is_support_sw_smu(adev)) 1450 size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf); 1451 else 1452 size = snprintf(buf, PAGE_SIZE, "\n"); 1453 1454 pm_runtime_mark_last_busy(ddev->dev); 1455 pm_runtime_put_autosuspend(ddev->dev); 1456 1457 return size; 1458 } 1459 1460 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, 1461 struct device_attribute *attr, 1462 const char *buf, 1463 size_t count) 1464 { 1465 struct drm_device *ddev = dev_get_drvdata(dev); 1466 struct amdgpu_device *adev = drm_to_adev(ddev); 1467 int ret; 1468 uint32_t mask = 0; 1469 1470 if (amdgpu_in_reset(adev)) 1471 return -EPERM; 1472 1473 ret = amdgpu_read_mask(buf, count, &mask); 1474 if (ret) 1475 return ret; 1476 1477 ret = pm_runtime_get_sync(ddev->dev); 1478 if (ret < 0) { 1479 pm_runtime_put_autosuspend(ddev->dev); 1480 return ret; 1481 } 1482 1483 if (is_support_sw_smu(adev)) 1484 ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask); 1485 else 1486 ret = 0; 1487 1488 pm_runtime_mark_last_busy(ddev->dev); 1489 pm_runtime_put_autosuspend(ddev->dev); 1490 1491 if (ret) 1492 return -EINVAL; 1493 1494 return count; 1495 } 1496 1497 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1498 struct device_attribute *attr, 1499 char *buf) 1500 { 1501 struct drm_device *ddev = dev_get_drvdata(dev); 1502 struct amdgpu_device *adev = drm_to_adev(ddev); 1503 ssize_t size; 1504 int ret; 1505 1506 if (amdgpu_in_reset(adev)) 1507 return -EPERM; 1508 1509 ret = pm_runtime_get_sync(ddev->dev); 1510 if (ret < 0) { 1511 pm_runtime_put_autosuspend(ddev->dev); 1512 return ret; 1513 } 1514 1515 if (is_support_sw_smu(adev)) 1516 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1517 else if (adev->powerplay.pp_funcs->print_clock_levels) 1518 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); 1519 else 1520 size = snprintf(buf, PAGE_SIZE, "\n"); 1521 1522 pm_runtime_mark_last_busy(ddev->dev); 1523 pm_runtime_put_autosuspend(ddev->dev); 1524 1525 return size; 1526 } 1527 1528 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1529 struct device_attribute *attr, 1530 const char *buf, 1531 size_t count) 1532 { 1533 struct drm_device *ddev = dev_get_drvdata(dev); 1534 struct amdgpu_device *adev = drm_to_adev(ddev); 1535 int ret; 1536 uint32_t mask = 0; 1537 1538 if (amdgpu_in_reset(adev)) 1539 return -EPERM; 1540 1541 ret = amdgpu_read_mask(buf, count, &mask); 1542 if (ret) 1543 return ret; 1544 1545 ret = pm_runtime_get_sync(ddev->dev); 1546 if (ret < 0) { 1547 pm_runtime_put_autosuspend(ddev->dev); 1548 return ret; 1549 } 1550 1551 if (is_support_sw_smu(adev)) 1552 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); 1553 else if (adev->powerplay.pp_funcs->force_clock_level) 1554 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); 1555 else 1556 ret = 0; 1557 1558 pm_runtime_mark_last_busy(ddev->dev); 1559 pm_runtime_put_autosuspend(ddev->dev); 1560 1561 if (ret) 1562 return -EINVAL; 1563 1564 return count; 1565 } 1566 1567 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1568 struct device_attribute *attr, 1569 char *buf) 1570 { 1571 struct drm_device *ddev = dev_get_drvdata(dev); 1572 struct amdgpu_device *adev = drm_to_adev(ddev); 1573 ssize_t size; 1574 int ret; 1575 1576 if (amdgpu_in_reset(adev)) 1577 return -EPERM; 1578 1579 ret = pm_runtime_get_sync(ddev->dev); 1580 if (ret < 0) { 1581 pm_runtime_put_autosuspend(ddev->dev); 1582 return ret; 1583 } 1584 1585 if (is_support_sw_smu(adev)) 1586 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1587 else if (adev->powerplay.pp_funcs->print_clock_levels) 1588 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 1589 else 1590 size = snprintf(buf, PAGE_SIZE, "\n"); 1591 1592 pm_runtime_mark_last_busy(ddev->dev); 1593 pm_runtime_put_autosuspend(ddev->dev); 1594 1595 return size; 1596 } 1597 1598 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1599 struct device_attribute *attr, 1600 const char *buf, 1601 size_t count) 1602 { 1603 struct drm_device *ddev = dev_get_drvdata(dev); 1604 struct amdgpu_device *adev = drm_to_adev(ddev); 1605 int ret; 1606 uint32_t mask = 0; 1607 1608 if (amdgpu_in_reset(adev)) 1609 return -EPERM; 1610 1611 ret = amdgpu_read_mask(buf, count, &mask); 1612 if (ret) 1613 return ret; 1614 1615 ret = pm_runtime_get_sync(ddev->dev); 1616 if (ret < 0) { 1617 pm_runtime_put_autosuspend(ddev->dev); 1618 return ret; 1619 } 1620 1621 if (is_support_sw_smu(adev)) 1622 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); 1623 else if (adev->powerplay.pp_funcs->force_clock_level) 1624 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 1625 else 1626 ret = 0; 1627 1628 pm_runtime_mark_last_busy(ddev->dev); 1629 pm_runtime_put_autosuspend(ddev->dev); 1630 1631 if (ret) 1632 return -EINVAL; 1633 1634 return count; 1635 } 1636 1637 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1638 struct device_attribute *attr, 1639 char *buf) 1640 { 1641 struct drm_device *ddev = dev_get_drvdata(dev); 1642 struct amdgpu_device *adev = drm_to_adev(ddev); 1643 uint32_t value = 0; 1644 int ret; 1645 1646 if (amdgpu_in_reset(adev)) 1647 return -EPERM; 1648 1649 ret = pm_runtime_get_sync(ddev->dev); 1650 if (ret < 0) { 1651 pm_runtime_put_autosuspend(ddev->dev); 1652 return ret; 1653 } 1654 1655 if (is_support_sw_smu(adev)) 1656 value = 0; 1657 else if (adev->powerplay.pp_funcs->get_sclk_od) 1658 value = amdgpu_dpm_get_sclk_od(adev); 1659 1660 pm_runtime_mark_last_busy(ddev->dev); 1661 pm_runtime_put_autosuspend(ddev->dev); 1662 1663 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1664 } 1665 1666 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1667 struct device_attribute *attr, 1668 const char *buf, 1669 size_t count) 1670 { 1671 struct drm_device *ddev = dev_get_drvdata(dev); 1672 struct amdgpu_device *adev = drm_to_adev(ddev); 1673 int ret; 1674 long int value; 1675 1676 if (amdgpu_in_reset(adev)) 1677 return -EPERM; 1678 1679 ret = kstrtol(buf, 0, &value); 1680 1681 if (ret) 1682 return -EINVAL; 1683 1684 ret = pm_runtime_get_sync(ddev->dev); 1685 if (ret < 0) { 1686 pm_runtime_put_autosuspend(ddev->dev); 1687 return ret; 1688 } 1689 1690 if (is_support_sw_smu(adev)) { 1691 value = 0; 1692 } else { 1693 if (adev->powerplay.pp_funcs->set_sclk_od) 1694 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1695 1696 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1697 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1698 } else { 1699 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1700 amdgpu_pm_compute_clocks(adev); 1701 } 1702 } 1703 1704 pm_runtime_mark_last_busy(ddev->dev); 1705 pm_runtime_put_autosuspend(ddev->dev); 1706 1707 return count; 1708 } 1709 1710 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1711 struct device_attribute *attr, 1712 char *buf) 1713 { 1714 struct drm_device *ddev = dev_get_drvdata(dev); 1715 struct amdgpu_device *adev = drm_to_adev(ddev); 1716 uint32_t value = 0; 1717 int ret; 1718 1719 if (amdgpu_in_reset(adev)) 1720 return -EPERM; 1721 1722 ret = pm_runtime_get_sync(ddev->dev); 1723 if (ret < 0) { 1724 pm_runtime_put_autosuspend(ddev->dev); 1725 return ret; 1726 } 1727 1728 if (is_support_sw_smu(adev)) 1729 value = 0; 1730 else if (adev->powerplay.pp_funcs->get_mclk_od) 1731 value = amdgpu_dpm_get_mclk_od(adev); 1732 1733 pm_runtime_mark_last_busy(ddev->dev); 1734 pm_runtime_put_autosuspend(ddev->dev); 1735 1736 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1737 } 1738 1739 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1740 struct device_attribute *attr, 1741 const char *buf, 1742 size_t count) 1743 { 1744 struct drm_device *ddev = dev_get_drvdata(dev); 1745 struct amdgpu_device *adev = drm_to_adev(ddev); 1746 int ret; 1747 long int value; 1748 1749 if (amdgpu_in_reset(adev)) 1750 return -EPERM; 1751 1752 ret = kstrtol(buf, 0, &value); 1753 1754 if (ret) 1755 return -EINVAL; 1756 1757 ret = pm_runtime_get_sync(ddev->dev); 1758 if (ret < 0) { 1759 pm_runtime_put_autosuspend(ddev->dev); 1760 return ret; 1761 } 1762 1763 if (is_support_sw_smu(adev)) { 1764 value = 0; 1765 } else { 1766 if (adev->powerplay.pp_funcs->set_mclk_od) 1767 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1768 1769 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1770 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1771 } else { 1772 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1773 amdgpu_pm_compute_clocks(adev); 1774 } 1775 } 1776 1777 pm_runtime_mark_last_busy(ddev->dev); 1778 pm_runtime_put_autosuspend(ddev->dev); 1779 1780 return count; 1781 } 1782 1783 /** 1784 * DOC: pp_power_profile_mode 1785 * 1786 * The amdgpu driver provides a sysfs API for adjusting the heuristics 1787 * related to switching between power levels in a power state. The file 1788 * pp_power_profile_mode is used for this. 1789 * 1790 * Reading this file outputs a list of all of the predefined power profiles 1791 * and the relevant heuristics settings for that profile. 1792 * 1793 * To select a profile or create a custom profile, first select manual using 1794 * power_dpm_force_performance_level. Writing the number of a predefined 1795 * profile to pp_power_profile_mode will enable those heuristics. To 1796 * create a custom set of heuristics, write a string of numbers to the file 1797 * starting with the number of the custom profile along with a setting 1798 * for each heuristic parameter. Due to differences across asic families 1799 * the heuristic parameters vary from family to family. 1800 * 1801 */ 1802 1803 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1804 struct device_attribute *attr, 1805 char *buf) 1806 { 1807 struct drm_device *ddev = dev_get_drvdata(dev); 1808 struct amdgpu_device *adev = drm_to_adev(ddev); 1809 ssize_t size; 1810 int ret; 1811 1812 if (amdgpu_in_reset(adev)) 1813 return -EPERM; 1814 1815 ret = pm_runtime_get_sync(ddev->dev); 1816 if (ret < 0) { 1817 pm_runtime_put_autosuspend(ddev->dev); 1818 return ret; 1819 } 1820 1821 if (is_support_sw_smu(adev)) 1822 size = smu_get_power_profile_mode(&adev->smu, buf); 1823 else if (adev->powerplay.pp_funcs->get_power_profile_mode) 1824 size = amdgpu_dpm_get_power_profile_mode(adev, buf); 1825 else 1826 size = snprintf(buf, PAGE_SIZE, "\n"); 1827 1828 pm_runtime_mark_last_busy(ddev->dev); 1829 pm_runtime_put_autosuspend(ddev->dev); 1830 1831 return size; 1832 } 1833 1834 1835 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1836 struct device_attribute *attr, 1837 const char *buf, 1838 size_t count) 1839 { 1840 int ret; 1841 struct drm_device *ddev = dev_get_drvdata(dev); 1842 struct amdgpu_device *adev = drm_to_adev(ddev); 1843 uint32_t parameter_size = 0; 1844 long parameter[64]; 1845 char *sub_str, buf_cpy[128]; 1846 char *tmp_str; 1847 uint32_t i = 0; 1848 char tmp[2]; 1849 long int profile_mode = 0; 1850 const char delimiter[3] = {' ', '\n', '\0'}; 1851 1852 if (amdgpu_in_reset(adev)) 1853 return -EPERM; 1854 1855 tmp[0] = *(buf); 1856 tmp[1] = '\0'; 1857 ret = kstrtol(tmp, 0, &profile_mode); 1858 if (ret) 1859 return -EINVAL; 1860 1861 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1862 if (count < 2 || count > 127) 1863 return -EINVAL; 1864 while (isspace(*++buf)) 1865 i++; 1866 memcpy(buf_cpy, buf, count-i); 1867 tmp_str = buf_cpy; 1868 while (tmp_str[0]) { 1869 sub_str = strsep(&tmp_str, delimiter); 1870 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 1871 if (ret) 1872 return -EINVAL; 1873 parameter_size++; 1874 while (isspace(*tmp_str)) 1875 tmp_str++; 1876 } 1877 } 1878 parameter[parameter_size] = profile_mode; 1879 1880 ret = pm_runtime_get_sync(ddev->dev); 1881 if (ret < 0) { 1882 pm_runtime_put_autosuspend(ddev->dev); 1883 return ret; 1884 } 1885 1886 if (is_support_sw_smu(adev)) 1887 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); 1888 else if (adev->powerplay.pp_funcs->set_power_profile_mode) 1889 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1890 1891 pm_runtime_mark_last_busy(ddev->dev); 1892 pm_runtime_put_autosuspend(ddev->dev); 1893 1894 if (!ret) 1895 return count; 1896 1897 return -EINVAL; 1898 } 1899 1900 /** 1901 * DOC: gpu_busy_percent 1902 * 1903 * The amdgpu driver provides a sysfs API for reading how busy the GPU 1904 * is as a percentage. The file gpu_busy_percent is used for this. 1905 * The SMU firmware computes a percentage of load based on the 1906 * aggregate activity level in the IP cores. 1907 */ 1908 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, 1909 struct device_attribute *attr, 1910 char *buf) 1911 { 1912 struct drm_device *ddev = dev_get_drvdata(dev); 1913 struct amdgpu_device *adev = drm_to_adev(ddev); 1914 int r, value, size = sizeof(value); 1915 1916 if (amdgpu_in_reset(adev)) 1917 return -EPERM; 1918 1919 r = pm_runtime_get_sync(ddev->dev); 1920 if (r < 0) { 1921 pm_runtime_put_autosuspend(ddev->dev); 1922 return r; 1923 } 1924 1925 /* read the IP busy sensor */ 1926 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1927 (void *)&value, &size); 1928 1929 pm_runtime_mark_last_busy(ddev->dev); 1930 pm_runtime_put_autosuspend(ddev->dev); 1931 1932 if (r) 1933 return r; 1934 1935 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1936 } 1937 1938 /** 1939 * DOC: mem_busy_percent 1940 * 1941 * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1942 * is as a percentage. The file mem_busy_percent is used for this. 1943 * The SMU firmware computes a percentage of load based on the 1944 * aggregate activity level in the IP cores. 1945 */ 1946 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, 1947 struct device_attribute *attr, 1948 char *buf) 1949 { 1950 struct drm_device *ddev = dev_get_drvdata(dev); 1951 struct amdgpu_device *adev = drm_to_adev(ddev); 1952 int r, value, size = sizeof(value); 1953 1954 if (amdgpu_in_reset(adev)) 1955 return -EPERM; 1956 1957 r = pm_runtime_get_sync(ddev->dev); 1958 if (r < 0) { 1959 pm_runtime_put_autosuspend(ddev->dev); 1960 return r; 1961 } 1962 1963 /* read the IP busy sensor */ 1964 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, 1965 (void *)&value, &size); 1966 1967 pm_runtime_mark_last_busy(ddev->dev); 1968 pm_runtime_put_autosuspend(ddev->dev); 1969 1970 if (r) 1971 return r; 1972 1973 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1974 } 1975 1976 /** 1977 * DOC: pcie_bw 1978 * 1979 * The amdgpu driver provides a sysfs API for estimating how much data 1980 * has been received and sent by the GPU in the last second through PCIe. 1981 * The file pcie_bw is used for this. 1982 * The Perf counters count the number of received and sent messages and return 1983 * those values, as well as the maximum payload size of a PCIe packet (mps). 1984 * Note that it is not possible to easily and quickly obtain the size of each 1985 * packet transmitted, so we output the max payload size (mps) to allow for 1986 * quick estimation of the PCIe bandwidth usage 1987 */ 1988 static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1989 struct device_attribute *attr, 1990 char *buf) 1991 { 1992 struct drm_device *ddev = dev_get_drvdata(dev); 1993 struct amdgpu_device *adev = drm_to_adev(ddev); 1994 uint64_t count0 = 0, count1 = 0; 1995 int ret; 1996 1997 if (amdgpu_in_reset(adev)) 1998 return -EPERM; 1999 2000 if (adev->flags & AMD_IS_APU) 2001 return -ENODATA; 2002 2003 if (!adev->asic_funcs->get_pcie_usage) 2004 return -ENODATA; 2005 2006 ret = pm_runtime_get_sync(ddev->dev); 2007 if (ret < 0) { 2008 pm_runtime_put_autosuspend(ddev->dev); 2009 return ret; 2010 } 2011 2012 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 2013 2014 pm_runtime_mark_last_busy(ddev->dev); 2015 pm_runtime_put_autosuspend(ddev->dev); 2016 2017 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 2018 count0, count1, pcie_get_mps(adev->pdev)); 2019 } 2020 2021 /** 2022 * DOC: unique_id 2023 * 2024 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 2025 * The file unique_id is used for this. 2026 * This will provide a Unique ID that will persist from machine to machine 2027 * 2028 * NOTE: This will only work for GFX9 and newer. This file will be absent 2029 * on unsupported ASICs (GFX8 and older) 2030 */ 2031 static ssize_t amdgpu_get_unique_id(struct device *dev, 2032 struct device_attribute *attr, 2033 char *buf) 2034 { 2035 struct drm_device *ddev = dev_get_drvdata(dev); 2036 struct amdgpu_device *adev = drm_to_adev(ddev); 2037 2038 if (amdgpu_in_reset(adev)) 2039 return -EPERM; 2040 2041 if (adev->unique_id) 2042 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); 2043 2044 return 0; 2045 } 2046 2047 /** 2048 * DOC: thermal_throttling_logging 2049 * 2050 * Thermal throttling pulls down the clock frequency and thus the performance. 2051 * It's an useful mechanism to protect the chip from overheating. Since it 2052 * impacts performance, the user controls whether it is enabled and if so, 2053 * the log frequency. 2054 * 2055 * Reading back the file shows you the status(enabled or disabled) and 2056 * the interval(in seconds) between each thermal logging. 2057 * 2058 * Writing an integer to the file, sets a new logging interval, in seconds. 2059 * The value should be between 1 and 3600. If the value is less than 1, 2060 * thermal logging is disabled. Values greater than 3600 are ignored. 2061 */ 2062 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, 2063 struct device_attribute *attr, 2064 char *buf) 2065 { 2066 struct drm_device *ddev = dev_get_drvdata(dev); 2067 struct amdgpu_device *adev = drm_to_adev(ddev); 2068 2069 return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n", 2070 adev_to_drm(adev)->unique, 2071 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", 2072 adev->throttling_logging_rs.interval / HZ + 1); 2073 } 2074 2075 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, 2076 struct device_attribute *attr, 2077 const char *buf, 2078 size_t count) 2079 { 2080 struct drm_device *ddev = dev_get_drvdata(dev); 2081 struct amdgpu_device *adev = drm_to_adev(ddev); 2082 long throttling_logging_interval; 2083 unsigned long flags; 2084 int ret = 0; 2085 2086 ret = kstrtol(buf, 0, &throttling_logging_interval); 2087 if (ret) 2088 return ret; 2089 2090 if (throttling_logging_interval > 3600) 2091 return -EINVAL; 2092 2093 if (throttling_logging_interval > 0) { 2094 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags); 2095 /* 2096 * Reset the ratelimit timer internals. 2097 * This can effectively restart the timer. 2098 */ 2099 adev->throttling_logging_rs.interval = 2100 (throttling_logging_interval - 1) * HZ; 2101 adev->throttling_logging_rs.begin = 0; 2102 adev->throttling_logging_rs.printed = 0; 2103 adev->throttling_logging_rs.missed = 0; 2104 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags); 2105 2106 atomic_set(&adev->throttling_logging_enabled, 1); 2107 } else { 2108 atomic_set(&adev->throttling_logging_enabled, 0); 2109 } 2110 2111 return count; 2112 } 2113 2114 /** 2115 * DOC: gpu_metrics 2116 * 2117 * The amdgpu driver provides a sysfs API for retrieving current gpu 2118 * metrics data. The file gpu_metrics is used for this. Reading the 2119 * file will dump all the current gpu metrics data. 2120 * 2121 * These data include temperature, frequency, engines utilization, 2122 * power consume, throttler status, fan speed and cpu core statistics( 2123 * available for APU only). That's it will give a snapshot of all sensors 2124 * at the same time. 2125 */ 2126 static ssize_t amdgpu_get_gpu_metrics(struct device *dev, 2127 struct device_attribute *attr, 2128 char *buf) 2129 { 2130 struct drm_device *ddev = dev_get_drvdata(dev); 2131 struct amdgpu_device *adev = drm_to_adev(ddev); 2132 void *gpu_metrics; 2133 ssize_t size = 0; 2134 int ret; 2135 2136 if (amdgpu_in_reset(adev)) 2137 return -EPERM; 2138 2139 ret = pm_runtime_get_sync(ddev->dev); 2140 if (ret < 0) { 2141 pm_runtime_put_autosuspend(ddev->dev); 2142 return ret; 2143 } 2144 2145 if (is_support_sw_smu(adev)) 2146 size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics); 2147 else if (adev->powerplay.pp_funcs->get_gpu_metrics) 2148 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); 2149 2150 if (size <= 0) 2151 goto out; 2152 2153 if (size >= PAGE_SIZE) 2154 size = PAGE_SIZE - 1; 2155 2156 memcpy(buf, gpu_metrics, size); 2157 2158 out: 2159 pm_runtime_mark_last_busy(ddev->dev); 2160 pm_runtime_put_autosuspend(ddev->dev); 2161 2162 return size; 2163 } 2164 2165 static struct amdgpu_device_attr amdgpu_device_attrs[] = { 2166 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2167 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC), 2168 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC), 2169 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC), 2170 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC), 2171 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), 2172 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2173 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2174 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2175 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2176 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2177 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2178 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC), 2179 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), 2180 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), 2181 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), 2182 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), 2183 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), 2184 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), 2185 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), 2186 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), 2187 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), 2188 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), 2189 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC), 2190 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC), 2191 }; 2192 2193 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 2194 uint32_t mask, enum amdgpu_device_attr_states *states) 2195 { 2196 struct device_attribute *dev_attr = &attr->dev_attr; 2197 const char *attr_name = dev_attr->attr.name; 2198 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 2199 enum amd_asic_type asic_type = adev->asic_type; 2200 2201 if (!(attr->flags & mask)) { 2202 *states = ATTR_STATE_UNSUPPORTED; 2203 return 0; 2204 } 2205 2206 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) 2207 2208 if (DEVICE_ATTR_IS(pp_dpm_socclk)) { 2209 if (asic_type < CHIP_VEGA10) 2210 *states = ATTR_STATE_UNSUPPORTED; 2211 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 2212 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) 2213 *states = ATTR_STATE_UNSUPPORTED; 2214 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { 2215 if (asic_type < CHIP_VEGA20) 2216 *states = ATTR_STATE_UNSUPPORTED; 2217 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { 2218 *states = ATTR_STATE_UNSUPPORTED; 2219 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 2220 (is_support_sw_smu(adev) && adev->smu.fine_grain_enabled) || 2221 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 2222 *states = ATTR_STATE_SUPPORTED; 2223 } else if (DEVICE_ATTR_IS(mem_busy_percent)) { 2224 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10) 2225 *states = ATTR_STATE_UNSUPPORTED; 2226 } else if (DEVICE_ATTR_IS(pcie_bw)) { 2227 /* PCIe Perf counters won't work on APU nodes */ 2228 if (adev->flags & AMD_IS_APU) 2229 *states = ATTR_STATE_UNSUPPORTED; 2230 } else if (DEVICE_ATTR_IS(unique_id)) { 2231 if (asic_type != CHIP_VEGA10 && 2232 asic_type != CHIP_VEGA20 && 2233 asic_type != CHIP_ARCTURUS) 2234 *states = ATTR_STATE_UNSUPPORTED; 2235 } else if (DEVICE_ATTR_IS(pp_features)) { 2236 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10) 2237 *states = ATTR_STATE_UNSUPPORTED; 2238 } else if (DEVICE_ATTR_IS(gpu_metrics)) { 2239 if (asic_type < CHIP_VEGA12) 2240 *states = ATTR_STATE_UNSUPPORTED; 2241 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { 2242 if (!(asic_type == CHIP_VANGOGH)) 2243 *states = ATTR_STATE_UNSUPPORTED; 2244 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { 2245 if (!(asic_type == CHIP_VANGOGH)) 2246 *states = ATTR_STATE_UNSUPPORTED; 2247 } 2248 2249 if (asic_type == CHIP_ARCTURUS) { 2250 /* Arcturus does not support standalone mclk/socclk/fclk level setting */ 2251 if (DEVICE_ATTR_IS(pp_dpm_mclk) || 2252 DEVICE_ATTR_IS(pp_dpm_socclk) || 2253 DEVICE_ATTR_IS(pp_dpm_fclk)) { 2254 dev_attr->attr.mode &= ~S_IWUGO; 2255 dev_attr->store = NULL; 2256 } 2257 } 2258 2259 #undef DEVICE_ATTR_IS 2260 2261 return 0; 2262 } 2263 2264 2265 static int amdgpu_device_attr_create(struct amdgpu_device *adev, 2266 struct amdgpu_device_attr *attr, 2267 uint32_t mask, struct list_head *attr_list) 2268 { 2269 int ret = 0; 2270 struct device_attribute *dev_attr = &attr->dev_attr; 2271 const char *name = dev_attr->attr.name; 2272 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; 2273 struct amdgpu_device_attr_entry *attr_entry; 2274 2275 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 2276 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; 2277 2278 BUG_ON(!attr); 2279 2280 attr_update = attr->attr_update ? attr_update : default_attr_update; 2281 2282 ret = attr_update(adev, attr, mask, &attr_states); 2283 if (ret) { 2284 dev_err(adev->dev, "failed to update device file %s, ret = %d\n", 2285 name, ret); 2286 return ret; 2287 } 2288 2289 if (attr_states == ATTR_STATE_UNSUPPORTED) 2290 return 0; 2291 2292 ret = device_create_file(adev->dev, dev_attr); 2293 if (ret) { 2294 dev_err(adev->dev, "failed to create device file %s, ret = %d\n", 2295 name, ret); 2296 } 2297 2298 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL); 2299 if (!attr_entry) 2300 return -ENOMEM; 2301 2302 attr_entry->attr = attr; 2303 INIT_LIST_HEAD(&attr_entry->entry); 2304 2305 list_add_tail(&attr_entry->entry, attr_list); 2306 2307 return ret; 2308 } 2309 2310 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr) 2311 { 2312 struct device_attribute *dev_attr = &attr->dev_attr; 2313 2314 device_remove_file(adev->dev, dev_attr); 2315 } 2316 2317 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 2318 struct list_head *attr_list); 2319 2320 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev, 2321 struct amdgpu_device_attr *attrs, 2322 uint32_t counts, 2323 uint32_t mask, 2324 struct list_head *attr_list) 2325 { 2326 int ret = 0; 2327 uint32_t i = 0; 2328 2329 for (i = 0; i < counts; i++) { 2330 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list); 2331 if (ret) 2332 goto failed; 2333 } 2334 2335 return 0; 2336 2337 failed: 2338 amdgpu_device_attr_remove_groups(adev, attr_list); 2339 2340 return ret; 2341 } 2342 2343 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 2344 struct list_head *attr_list) 2345 { 2346 struct amdgpu_device_attr_entry *entry, *entry_tmp; 2347 2348 if (list_empty(attr_list)) 2349 return ; 2350 2351 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) { 2352 amdgpu_device_attr_remove(adev, entry->attr); 2353 list_del(&entry->entry); 2354 kfree(entry); 2355 } 2356 } 2357 2358 static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 2359 struct device_attribute *attr, 2360 char *buf) 2361 { 2362 struct amdgpu_device *adev = dev_get_drvdata(dev); 2363 int channel = to_sensor_dev_attr(attr)->index; 2364 int r, temp = 0, size = sizeof(temp); 2365 2366 if (amdgpu_in_reset(adev)) 2367 return -EPERM; 2368 2369 if (channel >= PP_TEMP_MAX) 2370 return -EINVAL; 2371 2372 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2373 if (r < 0) { 2374 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2375 return r; 2376 } 2377 2378 switch (channel) { 2379 case PP_TEMP_JUNCTION: 2380 /* get current junction temperature */ 2381 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 2382 (void *)&temp, &size); 2383 break; 2384 case PP_TEMP_EDGE: 2385 /* get current edge temperature */ 2386 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 2387 (void *)&temp, &size); 2388 break; 2389 case PP_TEMP_MEM: 2390 /* get current memory temperature */ 2391 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 2392 (void *)&temp, &size); 2393 break; 2394 default: 2395 r = -EINVAL; 2396 break; 2397 } 2398 2399 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2400 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2401 2402 if (r) 2403 return r; 2404 2405 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2406 } 2407 2408 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 2409 struct device_attribute *attr, 2410 char *buf) 2411 { 2412 struct amdgpu_device *adev = dev_get_drvdata(dev); 2413 int hyst = to_sensor_dev_attr(attr)->index; 2414 int temp; 2415 2416 if (hyst) 2417 temp = adev->pm.dpm.thermal.min_temp; 2418 else 2419 temp = adev->pm.dpm.thermal.max_temp; 2420 2421 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2422 } 2423 2424 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 2425 struct device_attribute *attr, 2426 char *buf) 2427 { 2428 struct amdgpu_device *adev = dev_get_drvdata(dev); 2429 int hyst = to_sensor_dev_attr(attr)->index; 2430 int temp; 2431 2432 if (hyst) 2433 temp = adev->pm.dpm.thermal.min_hotspot_temp; 2434 else 2435 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 2436 2437 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2438 } 2439 2440 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 2441 struct device_attribute *attr, 2442 char *buf) 2443 { 2444 struct amdgpu_device *adev = dev_get_drvdata(dev); 2445 int hyst = to_sensor_dev_attr(attr)->index; 2446 int temp; 2447 2448 if (hyst) 2449 temp = adev->pm.dpm.thermal.min_mem_temp; 2450 else 2451 temp = adev->pm.dpm.thermal.max_mem_crit_temp; 2452 2453 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2454 } 2455 2456 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 2457 struct device_attribute *attr, 2458 char *buf) 2459 { 2460 int channel = to_sensor_dev_attr(attr)->index; 2461 2462 if (channel >= PP_TEMP_MAX) 2463 return -EINVAL; 2464 2465 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); 2466 } 2467 2468 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 2469 struct device_attribute *attr, 2470 char *buf) 2471 { 2472 struct amdgpu_device *adev = dev_get_drvdata(dev); 2473 int channel = to_sensor_dev_attr(attr)->index; 2474 int temp = 0; 2475 2476 if (channel >= PP_TEMP_MAX) 2477 return -EINVAL; 2478 2479 switch (channel) { 2480 case PP_TEMP_JUNCTION: 2481 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 2482 break; 2483 case PP_TEMP_EDGE: 2484 temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 2485 break; 2486 case PP_TEMP_MEM: 2487 temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 2488 break; 2489 } 2490 2491 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2492 } 2493 2494 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 2495 struct device_attribute *attr, 2496 char *buf) 2497 { 2498 struct amdgpu_device *adev = dev_get_drvdata(dev); 2499 u32 pwm_mode = 0; 2500 int ret; 2501 2502 if (amdgpu_in_reset(adev)) 2503 return -EPERM; 2504 2505 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2506 if (ret < 0) { 2507 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2508 return ret; 2509 } 2510 2511 if (is_support_sw_smu(adev)) { 2512 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2513 } else { 2514 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2515 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2516 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2517 return -EINVAL; 2518 } 2519 2520 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2521 } 2522 2523 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2524 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2525 2526 return sprintf(buf, "%i\n", pwm_mode); 2527 } 2528 2529 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 2530 struct device_attribute *attr, 2531 const char *buf, 2532 size_t count) 2533 { 2534 struct amdgpu_device *adev = dev_get_drvdata(dev); 2535 int err, ret; 2536 int value; 2537 2538 if (amdgpu_in_reset(adev)) 2539 return -EPERM; 2540 2541 err = kstrtoint(buf, 10, &value); 2542 if (err) 2543 return err; 2544 2545 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2546 if (ret < 0) { 2547 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2548 return ret; 2549 } 2550 2551 if (is_support_sw_smu(adev)) { 2552 smu_set_fan_control_mode(&adev->smu, value); 2553 } else { 2554 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2555 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2556 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2557 return -EINVAL; 2558 } 2559 2560 amdgpu_dpm_set_fan_control_mode(adev, value); 2561 } 2562 2563 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2564 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2565 2566 return count; 2567 } 2568 2569 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 2570 struct device_attribute *attr, 2571 char *buf) 2572 { 2573 return sprintf(buf, "%i\n", 0); 2574 } 2575 2576 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 2577 struct device_attribute *attr, 2578 char *buf) 2579 { 2580 return sprintf(buf, "%i\n", 255); 2581 } 2582 2583 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 2584 struct device_attribute *attr, 2585 const char *buf, size_t count) 2586 { 2587 struct amdgpu_device *adev = dev_get_drvdata(dev); 2588 int err; 2589 u32 value; 2590 u32 pwm_mode; 2591 2592 if (amdgpu_in_reset(adev)) 2593 return -EPERM; 2594 2595 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2596 if (err < 0) { 2597 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2598 return err; 2599 } 2600 2601 if (is_support_sw_smu(adev)) 2602 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2603 else 2604 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2605 2606 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2607 pr_info("manual fan speed control should be enabled first\n"); 2608 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2609 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2610 return -EINVAL; 2611 } 2612 2613 err = kstrtou32(buf, 10, &value); 2614 if (err) { 2615 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2616 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2617 return err; 2618 } 2619 2620 value = (value * 100) / 255; 2621 2622 if (is_support_sw_smu(adev)) 2623 err = smu_set_fan_speed_percent(&adev->smu, value); 2624 else if (adev->powerplay.pp_funcs->set_fan_speed_percent) 2625 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 2626 else 2627 err = -EINVAL; 2628 2629 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2630 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2631 2632 if (err) 2633 return err; 2634 2635 return count; 2636 } 2637 2638 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 2639 struct device_attribute *attr, 2640 char *buf) 2641 { 2642 struct amdgpu_device *adev = dev_get_drvdata(dev); 2643 int err; 2644 u32 speed = 0; 2645 2646 if (amdgpu_in_reset(adev)) 2647 return -EPERM; 2648 2649 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2650 if (err < 0) { 2651 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2652 return err; 2653 } 2654 2655 if (is_support_sw_smu(adev)) 2656 err = smu_get_fan_speed_percent(&adev->smu, &speed); 2657 else if (adev->powerplay.pp_funcs->get_fan_speed_percent) 2658 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 2659 else 2660 err = -EINVAL; 2661 2662 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2663 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2664 2665 if (err) 2666 return err; 2667 2668 speed = (speed * 255) / 100; 2669 2670 return sprintf(buf, "%i\n", speed); 2671 } 2672 2673 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 2674 struct device_attribute *attr, 2675 char *buf) 2676 { 2677 struct amdgpu_device *adev = dev_get_drvdata(dev); 2678 int err; 2679 u32 speed = 0; 2680 2681 if (amdgpu_in_reset(adev)) 2682 return -EPERM; 2683 2684 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2685 if (err < 0) { 2686 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2687 return err; 2688 } 2689 2690 if (is_support_sw_smu(adev)) 2691 err = smu_get_fan_speed_rpm(&adev->smu, &speed); 2692 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2693 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 2694 else 2695 err = -EINVAL; 2696 2697 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2698 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2699 2700 if (err) 2701 return err; 2702 2703 return sprintf(buf, "%i\n", speed); 2704 } 2705 2706 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 2707 struct device_attribute *attr, 2708 char *buf) 2709 { 2710 struct amdgpu_device *adev = dev_get_drvdata(dev); 2711 u32 min_rpm = 0; 2712 u32 size = sizeof(min_rpm); 2713 int r; 2714 2715 if (amdgpu_in_reset(adev)) 2716 return -EPERM; 2717 2718 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2719 if (r < 0) { 2720 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2721 return r; 2722 } 2723 2724 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2725 (void *)&min_rpm, &size); 2726 2727 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2728 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2729 2730 if (r) 2731 return r; 2732 2733 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); 2734 } 2735 2736 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 2737 struct device_attribute *attr, 2738 char *buf) 2739 { 2740 struct amdgpu_device *adev = dev_get_drvdata(dev); 2741 u32 max_rpm = 0; 2742 u32 size = sizeof(max_rpm); 2743 int r; 2744 2745 if (amdgpu_in_reset(adev)) 2746 return -EPERM; 2747 2748 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2749 if (r < 0) { 2750 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2751 return r; 2752 } 2753 2754 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2755 (void *)&max_rpm, &size); 2756 2757 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2758 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2759 2760 if (r) 2761 return r; 2762 2763 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); 2764 } 2765 2766 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 2767 struct device_attribute *attr, 2768 char *buf) 2769 { 2770 struct amdgpu_device *adev = dev_get_drvdata(dev); 2771 int err; 2772 u32 rpm = 0; 2773 2774 if (amdgpu_in_reset(adev)) 2775 return -EPERM; 2776 2777 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2778 if (err < 0) { 2779 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2780 return err; 2781 } 2782 2783 if (is_support_sw_smu(adev)) 2784 err = smu_get_fan_speed_rpm(&adev->smu, &rpm); 2785 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2786 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 2787 else 2788 err = -EINVAL; 2789 2790 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2791 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2792 2793 if (err) 2794 return err; 2795 2796 return sprintf(buf, "%i\n", rpm); 2797 } 2798 2799 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 2800 struct device_attribute *attr, 2801 const char *buf, size_t count) 2802 { 2803 struct amdgpu_device *adev = dev_get_drvdata(dev); 2804 int err; 2805 u32 value; 2806 u32 pwm_mode; 2807 2808 if (amdgpu_in_reset(adev)) 2809 return -EPERM; 2810 2811 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2812 if (err < 0) { 2813 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2814 return err; 2815 } 2816 2817 if (is_support_sw_smu(adev)) 2818 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2819 else 2820 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2821 2822 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2823 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2824 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2825 return -ENODATA; 2826 } 2827 2828 err = kstrtou32(buf, 10, &value); 2829 if (err) { 2830 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2831 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2832 return err; 2833 } 2834 2835 if (is_support_sw_smu(adev)) 2836 err = smu_set_fan_speed_rpm(&adev->smu, value); 2837 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) 2838 err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 2839 else 2840 err = -EINVAL; 2841 2842 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2843 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2844 2845 if (err) 2846 return err; 2847 2848 return count; 2849 } 2850 2851 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 2852 struct device_attribute *attr, 2853 char *buf) 2854 { 2855 struct amdgpu_device *adev = dev_get_drvdata(dev); 2856 u32 pwm_mode = 0; 2857 int ret; 2858 2859 if (amdgpu_in_reset(adev)) 2860 return -EPERM; 2861 2862 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2863 if (ret < 0) { 2864 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2865 return ret; 2866 } 2867 2868 if (is_support_sw_smu(adev)) { 2869 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2870 } else { 2871 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2872 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2873 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2874 return -EINVAL; 2875 } 2876 2877 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2878 } 2879 2880 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2881 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2882 2883 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 2884 } 2885 2886 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 2887 struct device_attribute *attr, 2888 const char *buf, 2889 size_t count) 2890 { 2891 struct amdgpu_device *adev = dev_get_drvdata(dev); 2892 int err; 2893 int value; 2894 u32 pwm_mode; 2895 2896 if (amdgpu_in_reset(adev)) 2897 return -EPERM; 2898 2899 err = kstrtoint(buf, 10, &value); 2900 if (err) 2901 return err; 2902 2903 if (value == 0) 2904 pwm_mode = AMD_FAN_CTRL_AUTO; 2905 else if (value == 1) 2906 pwm_mode = AMD_FAN_CTRL_MANUAL; 2907 else 2908 return -EINVAL; 2909 2910 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2911 if (err < 0) { 2912 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2913 return err; 2914 } 2915 2916 if (is_support_sw_smu(adev)) { 2917 smu_set_fan_control_mode(&adev->smu, pwm_mode); 2918 } else { 2919 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2920 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2921 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2922 return -EINVAL; 2923 } 2924 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2925 } 2926 2927 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2928 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2929 2930 return count; 2931 } 2932 2933 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 2934 struct device_attribute *attr, 2935 char *buf) 2936 { 2937 struct amdgpu_device *adev = dev_get_drvdata(dev); 2938 u32 vddgfx; 2939 int r, size = sizeof(vddgfx); 2940 2941 if (amdgpu_in_reset(adev)) 2942 return -EPERM; 2943 2944 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2945 if (r < 0) { 2946 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2947 return r; 2948 } 2949 2950 /* get the voltage */ 2951 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 2952 (void *)&vddgfx, &size); 2953 2954 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2955 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2956 2957 if (r) 2958 return r; 2959 2960 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); 2961 } 2962 2963 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 2964 struct device_attribute *attr, 2965 char *buf) 2966 { 2967 return snprintf(buf, PAGE_SIZE, "vddgfx\n"); 2968 } 2969 2970 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 2971 struct device_attribute *attr, 2972 char *buf) 2973 { 2974 struct amdgpu_device *adev = dev_get_drvdata(dev); 2975 u32 vddnb; 2976 int r, size = sizeof(vddnb); 2977 2978 if (amdgpu_in_reset(adev)) 2979 return -EPERM; 2980 2981 /* only APUs have vddnb */ 2982 if (!(adev->flags & AMD_IS_APU)) 2983 return -EINVAL; 2984 2985 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2986 if (r < 0) { 2987 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2988 return r; 2989 } 2990 2991 /* get the voltage */ 2992 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 2993 (void *)&vddnb, &size); 2994 2995 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2996 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2997 2998 if (r) 2999 return r; 3000 3001 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); 3002 } 3003 3004 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 3005 struct device_attribute *attr, 3006 char *buf) 3007 { 3008 return snprintf(buf, PAGE_SIZE, "vddnb\n"); 3009 } 3010 3011 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 3012 struct device_attribute *attr, 3013 char *buf) 3014 { 3015 struct amdgpu_device *adev = dev_get_drvdata(dev); 3016 u32 query = 0; 3017 int r, size = sizeof(u32); 3018 unsigned uw; 3019 3020 if (amdgpu_in_reset(adev)) 3021 return -EPERM; 3022 3023 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 3024 if (r < 0) { 3025 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3026 return r; 3027 } 3028 3029 /* get the voltage */ 3030 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 3031 (void *)&query, &size); 3032 3033 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 3034 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3035 3036 if (r) 3037 return r; 3038 3039 /* convert to microwatts */ 3040 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 3041 3042 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 3043 } 3044 3045 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 3046 struct device_attribute *attr, 3047 char *buf) 3048 { 3049 return sprintf(buf, "%i\n", 0); 3050 } 3051 3052 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 3053 struct device_attribute *attr, 3054 char *buf) 3055 { 3056 struct amdgpu_device *adev = dev_get_drvdata(dev); 3057 uint32_t limit = 0; 3058 ssize_t size; 3059 int r; 3060 3061 if (amdgpu_in_reset(adev)) 3062 return -EPERM; 3063 3064 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 3065 if (r < 0) { 3066 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3067 return r; 3068 } 3069 3070 if (is_support_sw_smu(adev)) { 3071 smu_get_power_limit(&adev->smu, &limit, true); 3072 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 3073 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 3074 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 3075 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 3076 } else { 3077 size = snprintf(buf, PAGE_SIZE, "\n"); 3078 } 3079 3080 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 3081 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3082 3083 return size; 3084 } 3085 3086 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 3087 struct device_attribute *attr, 3088 char *buf) 3089 { 3090 struct amdgpu_device *adev = dev_get_drvdata(dev); 3091 uint32_t limit = 0; 3092 ssize_t size; 3093 int r; 3094 3095 if (amdgpu_in_reset(adev)) 3096 return -EPERM; 3097 3098 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 3099 if (r < 0) { 3100 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3101 return r; 3102 } 3103 3104 if (is_support_sw_smu(adev)) { 3105 smu_get_power_limit(&adev->smu, &limit, false); 3106 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 3107 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 3108 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 3109 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 3110 } else { 3111 size = snprintf(buf, PAGE_SIZE, "\n"); 3112 } 3113 3114 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 3115 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3116 3117 return size; 3118 } 3119 3120 3121 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 3122 struct device_attribute *attr, 3123 const char *buf, 3124 size_t count) 3125 { 3126 struct amdgpu_device *adev = dev_get_drvdata(dev); 3127 int err; 3128 u32 value; 3129 3130 if (amdgpu_in_reset(adev)) 3131 return -EPERM; 3132 3133 if (amdgpu_sriov_vf(adev)) 3134 return -EINVAL; 3135 3136 err = kstrtou32(buf, 10, &value); 3137 if (err) 3138 return err; 3139 3140 value = value / 1000000; /* convert to Watt */ 3141 3142 3143 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 3144 if (err < 0) { 3145 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3146 return err; 3147 } 3148 3149 if (is_support_sw_smu(adev)) 3150 err = smu_set_power_limit(&adev->smu, value); 3151 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) 3152 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 3153 else 3154 err = -EINVAL; 3155 3156 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 3157 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3158 3159 if (err) 3160 return err; 3161 3162 return count; 3163 } 3164 3165 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 3166 struct device_attribute *attr, 3167 char *buf) 3168 { 3169 struct amdgpu_device *adev = dev_get_drvdata(dev); 3170 uint32_t sclk; 3171 int r, size = sizeof(sclk); 3172 3173 if (amdgpu_in_reset(adev)) 3174 return -EPERM; 3175 3176 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 3177 if (r < 0) { 3178 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3179 return r; 3180 } 3181 3182 /* get the sclk */ 3183 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 3184 (void *)&sclk, &size); 3185 3186 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 3187 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3188 3189 if (r) 3190 return r; 3191 3192 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); 3193 } 3194 3195 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 3196 struct device_attribute *attr, 3197 char *buf) 3198 { 3199 return snprintf(buf, PAGE_SIZE, "sclk\n"); 3200 } 3201 3202 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 3203 struct device_attribute *attr, 3204 char *buf) 3205 { 3206 struct amdgpu_device *adev = dev_get_drvdata(dev); 3207 uint32_t mclk; 3208 int r, size = sizeof(mclk); 3209 3210 if (amdgpu_in_reset(adev)) 3211 return -EPERM; 3212 3213 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 3214 if (r < 0) { 3215 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3216 return r; 3217 } 3218 3219 /* get the sclk */ 3220 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 3221 (void *)&mclk, &size); 3222 3223 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 3224 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3225 3226 if (r) 3227 return r; 3228 3229 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); 3230 } 3231 3232 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 3233 struct device_attribute *attr, 3234 char *buf) 3235 { 3236 return snprintf(buf, PAGE_SIZE, "mclk\n"); 3237 } 3238 3239 /** 3240 * DOC: hwmon 3241 * 3242 * The amdgpu driver exposes the following sensor interfaces: 3243 * 3244 * - GPU temperature (via the on-die sensor) 3245 * 3246 * - GPU voltage 3247 * 3248 * - Northbridge voltage (APUs only) 3249 * 3250 * - GPU power 3251 * 3252 * - GPU fan 3253 * 3254 * - GPU gfx/compute engine clock 3255 * 3256 * - GPU memory clock (dGPU only) 3257 * 3258 * hwmon interfaces for GPU temperature: 3259 * 3260 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 3261 * - temp2_input and temp3_input are supported on SOC15 dGPUs only 3262 * 3263 * - temp[1-3]_label: temperature channel label 3264 * - temp2_label and temp3_label are supported on SOC15 dGPUs only 3265 * 3266 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 3267 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 3268 * 3269 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 3270 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 3271 * 3272 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 3273 * - these are supported on SOC15 dGPUs only 3274 * 3275 * hwmon interfaces for GPU voltage: 3276 * 3277 * - in0_input: the voltage on the GPU in millivolts 3278 * 3279 * - in1_input: the voltage on the Northbridge in millivolts 3280 * 3281 * hwmon interfaces for GPU power: 3282 * 3283 * - power1_average: average power used by the GPU in microWatts 3284 * 3285 * - power1_cap_min: minimum cap supported in microWatts 3286 * 3287 * - power1_cap_max: maximum cap supported in microWatts 3288 * 3289 * - power1_cap: selected power cap in microWatts 3290 * 3291 * hwmon interfaces for GPU fan: 3292 * 3293 * - pwm1: pulse width modulation fan level (0-255) 3294 * 3295 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 3296 * 3297 * - pwm1_min: pulse width modulation fan control minimum level (0) 3298 * 3299 * - pwm1_max: pulse width modulation fan control maximum level (255) 3300 * 3301 * - fan1_min: an minimum value Unit: revolution/min (RPM) 3302 * 3303 * - fan1_max: an maxmum value Unit: revolution/max (RPM) 3304 * 3305 * - fan1_input: fan speed in RPM 3306 * 3307 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) 3308 * 3309 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable 3310 * 3311 * hwmon interfaces for GPU clocks: 3312 * 3313 * - freq1_input: the gfx/compute clock in hertz 3314 * 3315 * - freq2_input: the memory clock in hertz 3316 * 3317 * You can use hwmon tools like sensors to view this information on your system. 3318 * 3319 */ 3320 3321 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 3322 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 3323 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 3324 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 3325 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 3326 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 3327 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 3328 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 3329 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 3330 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 3331 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 3332 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 3333 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 3334 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 3335 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 3336 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 3337 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 3338 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 3339 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 3340 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 3341 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 3342 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 3343 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 3344 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 3345 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 3346 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 3347 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 3348 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 3349 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 3350 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 3351 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 3352 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 3353 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 3354 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 3355 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 3356 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 3357 3358 static struct attribute *hwmon_attributes[] = { 3359 &sensor_dev_attr_temp1_input.dev_attr.attr, 3360 &sensor_dev_attr_temp1_crit.dev_attr.attr, 3361 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 3362 &sensor_dev_attr_temp2_input.dev_attr.attr, 3363 &sensor_dev_attr_temp2_crit.dev_attr.attr, 3364 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 3365 &sensor_dev_attr_temp3_input.dev_attr.attr, 3366 &sensor_dev_attr_temp3_crit.dev_attr.attr, 3367 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 3368 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 3369 &sensor_dev_attr_temp2_emergency.dev_attr.attr, 3370 &sensor_dev_attr_temp3_emergency.dev_attr.attr, 3371 &sensor_dev_attr_temp1_label.dev_attr.attr, 3372 &sensor_dev_attr_temp2_label.dev_attr.attr, 3373 &sensor_dev_attr_temp3_label.dev_attr.attr, 3374 &sensor_dev_attr_pwm1.dev_attr.attr, 3375 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 3376 &sensor_dev_attr_pwm1_min.dev_attr.attr, 3377 &sensor_dev_attr_pwm1_max.dev_attr.attr, 3378 &sensor_dev_attr_fan1_input.dev_attr.attr, 3379 &sensor_dev_attr_fan1_min.dev_attr.attr, 3380 &sensor_dev_attr_fan1_max.dev_attr.attr, 3381 &sensor_dev_attr_fan1_target.dev_attr.attr, 3382 &sensor_dev_attr_fan1_enable.dev_attr.attr, 3383 &sensor_dev_attr_in0_input.dev_attr.attr, 3384 &sensor_dev_attr_in0_label.dev_attr.attr, 3385 &sensor_dev_attr_in1_input.dev_attr.attr, 3386 &sensor_dev_attr_in1_label.dev_attr.attr, 3387 &sensor_dev_attr_power1_average.dev_attr.attr, 3388 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 3389 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 3390 &sensor_dev_attr_power1_cap.dev_attr.attr, 3391 &sensor_dev_attr_freq1_input.dev_attr.attr, 3392 &sensor_dev_attr_freq1_label.dev_attr.attr, 3393 &sensor_dev_attr_freq2_input.dev_attr.attr, 3394 &sensor_dev_attr_freq2_label.dev_attr.attr, 3395 NULL 3396 }; 3397 3398 static umode_t hwmon_attributes_visible(struct kobject *kobj, 3399 struct attribute *attr, int index) 3400 { 3401 struct device *dev = kobj_to_dev(kobj); 3402 struct amdgpu_device *adev = dev_get_drvdata(dev); 3403 umode_t effective_mode = attr->mode; 3404 3405 /* under multi-vf mode, the hwmon attributes are all not supported */ 3406 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 3407 return 0; 3408 3409 /* there is no fan under pp one vf mode */ 3410 if (amdgpu_sriov_is_pp_one_vf(adev) && 3411 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3412 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3413 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3414 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3415 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3416 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3417 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3418 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3419 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3420 return 0; 3421 3422 /* Skip fan attributes if fan is not present */ 3423 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3424 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3425 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3426 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3427 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3428 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3429 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3430 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3431 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3432 return 0; 3433 3434 /* Skip fan attributes on APU */ 3435 if ((adev->flags & AMD_IS_APU) && 3436 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3437 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3438 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3439 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3440 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3441 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3442 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3443 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3444 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3445 return 0; 3446 3447 /* Skip crit temp on APU */ 3448 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) && 3449 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3450 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 3451 return 0; 3452 3453 /* Skip limit attributes if DPM is not enabled */ 3454 if (!adev->pm.dpm_enabled && 3455 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3456 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 3457 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3458 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3459 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3460 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3461 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3462 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3463 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3464 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3465 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3466 return 0; 3467 3468 if (!is_support_sw_smu(adev)) { 3469 /* mask fan attributes if we have no bindings for this asic to expose */ 3470 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 3471 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 3472 (!adev->powerplay.pp_funcs->get_fan_control_mode && 3473 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 3474 effective_mode &= ~S_IRUGO; 3475 3476 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 3477 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 3478 (!adev->powerplay.pp_funcs->set_fan_control_mode && 3479 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 3480 effective_mode &= ~S_IWUSR; 3481 } 3482 3483 if (((adev->flags & AMD_IS_APU) || 3484 adev->family == AMDGPU_FAMILY_SI) && /* not implemented yet */ 3485 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 3486 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 3487 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 3488 return 0; 3489 3490 if (((adev->family == AMDGPU_FAMILY_SI) || 3491 ((adev->flags & AMD_IS_APU) && 3492 (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */ 3493 (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) 3494 return 0; 3495 3496 if (!is_support_sw_smu(adev)) { 3497 /* hide max/min values if we can't both query and manage the fan */ 3498 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 3499 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 3500 (!adev->powerplay.pp_funcs->set_fan_speed_rpm && 3501 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 3502 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3503 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 3504 return 0; 3505 3506 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && 3507 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 3508 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3509 attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 3510 return 0; 3511 } 3512 3513 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 3514 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 3515 (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 3516 attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 3517 return 0; 3518 3519 /* only APUs have vddnb */ 3520 if (!(adev->flags & AMD_IS_APU) && 3521 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 3522 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 3523 return 0; 3524 3525 /* no mclk on APUs */ 3526 if ((adev->flags & AMD_IS_APU) && 3527 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 3528 attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 3529 return 0; 3530 3531 /* only SOC15 dGPUs support hotspot and mem temperatures */ 3532 if (((adev->flags & AMD_IS_APU) || 3533 adev->asic_type < CHIP_VEGA10) && 3534 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 3535 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 3536 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || 3537 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 3538 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 3539 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 3540 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || 3541 attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 3542 attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 3543 attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 3544 attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) 3545 return 0; 3546 3547 return effective_mode; 3548 } 3549 3550 static const struct attribute_group hwmon_attrgroup = { 3551 .attrs = hwmon_attributes, 3552 .is_visible = hwmon_attributes_visible, 3553 }; 3554 3555 static const struct attribute_group *hwmon_groups[] = { 3556 &hwmon_attrgroup, 3557 NULL 3558 }; 3559 3560 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 3561 { 3562 int ret; 3563 uint32_t mask = 0; 3564 3565 if (adev->pm.sysfs_initialized) 3566 return 0; 3567 3568 if (adev->pm.dpm_enabled == 0) 3569 return 0; 3570 3571 INIT_LIST_HEAD(&adev->pm.pm_attr_list); 3572 3573 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 3574 DRIVER_NAME, adev, 3575 hwmon_groups); 3576 if (IS_ERR(adev->pm.int_hwmon_dev)) { 3577 ret = PTR_ERR(adev->pm.int_hwmon_dev); 3578 dev_err(adev->dev, 3579 "Unable to register hwmon device: %d\n", ret); 3580 return ret; 3581 } 3582 3583 switch (amdgpu_virt_get_sriov_vf_mode(adev)) { 3584 case SRIOV_VF_MODE_ONE_VF: 3585 mask = ATTR_FLAG_ONEVF; 3586 break; 3587 case SRIOV_VF_MODE_MULTI_VF: 3588 mask = 0; 3589 break; 3590 case SRIOV_VF_MODE_BARE_METAL: 3591 default: 3592 mask = ATTR_FLAG_MASK_ALL; 3593 break; 3594 } 3595 3596 ret = amdgpu_device_attr_create_groups(adev, 3597 amdgpu_device_attrs, 3598 ARRAY_SIZE(amdgpu_device_attrs), 3599 mask, 3600 &adev->pm.pm_attr_list); 3601 if (ret) 3602 return ret; 3603 3604 adev->pm.sysfs_initialized = true; 3605 3606 return 0; 3607 } 3608 3609 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 3610 { 3611 if (adev->pm.dpm_enabled == 0) 3612 return; 3613 3614 if (adev->pm.int_hwmon_dev) 3615 hwmon_device_unregister(adev->pm.int_hwmon_dev); 3616 3617 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); 3618 } 3619 3620 /* 3621 * Debugfs info 3622 */ 3623 #if defined(CONFIG_DEBUG_FS) 3624 3625 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 3626 { 3627 uint32_t value; 3628 uint64_t value64 = 0; 3629 uint32_t query = 0; 3630 int size; 3631 3632 /* GPU Clocks */ 3633 size = sizeof(value); 3634 seq_printf(m, "GFX Clocks and Power:\n"); 3635 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 3636 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 3637 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 3638 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 3639 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 3640 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 3641 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 3642 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 3643 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 3644 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 3645 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 3646 seq_printf(m, "\t%u mV (VDDNB)\n", value); 3647 size = sizeof(uint32_t); 3648 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) 3649 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 3650 size = sizeof(value); 3651 seq_printf(m, "\n"); 3652 3653 /* GPU Temp */ 3654 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 3655 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 3656 3657 /* GPU Load */ 3658 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 3659 seq_printf(m, "GPU Load: %u %%\n", value); 3660 /* MEM Load */ 3661 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 3662 seq_printf(m, "MEM Load: %u %%\n", value); 3663 3664 seq_printf(m, "\n"); 3665 3666 /* SMC feature mask */ 3667 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3668 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3669 3670 if (adev->asic_type > CHIP_VEGA20) { 3671 /* VCN clocks */ 3672 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 3673 if (!value) { 3674 seq_printf(m, "VCN: Disabled\n"); 3675 } else { 3676 seq_printf(m, "VCN: Enabled\n"); 3677 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3678 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3679 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3680 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3681 } 3682 } 3683 seq_printf(m, "\n"); 3684 } else { 3685 /* UVD clocks */ 3686 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3687 if (!value) { 3688 seq_printf(m, "UVD: Disabled\n"); 3689 } else { 3690 seq_printf(m, "UVD: Enabled\n"); 3691 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3692 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3693 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3694 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3695 } 3696 } 3697 seq_printf(m, "\n"); 3698 3699 /* VCE clocks */ 3700 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3701 if (!value) { 3702 seq_printf(m, "VCE: Disabled\n"); 3703 } else { 3704 seq_printf(m, "VCE: Enabled\n"); 3705 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3706 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3707 } 3708 } 3709 } 3710 3711 return 0; 3712 } 3713 3714 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 3715 { 3716 int i; 3717 3718 for (i = 0; clocks[i].flag; i++) 3719 seq_printf(m, "\t%s: %s\n", clocks[i].name, 3720 (flags & clocks[i].flag) ? "On" : "Off"); 3721 } 3722 3723 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 3724 { 3725 struct drm_info_node *node = (struct drm_info_node *) m->private; 3726 struct drm_device *dev = node->minor->dev; 3727 struct amdgpu_device *adev = drm_to_adev(dev); 3728 u32 flags = 0; 3729 int r; 3730 3731 if (amdgpu_in_reset(adev)) 3732 return -EPERM; 3733 3734 r = pm_runtime_get_sync(dev->dev); 3735 if (r < 0) { 3736 pm_runtime_put_autosuspend(dev->dev); 3737 return r; 3738 } 3739 3740 if (!adev->pm.dpm_enabled) { 3741 seq_printf(m, "dpm not enabled\n"); 3742 pm_runtime_mark_last_busy(dev->dev); 3743 pm_runtime_put_autosuspend(dev->dev); 3744 return 0; 3745 } 3746 3747 if (!is_support_sw_smu(adev) && 3748 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3749 mutex_lock(&adev->pm.mutex); 3750 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 3751 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 3752 else 3753 seq_printf(m, "Debugfs support not implemented for this asic\n"); 3754 mutex_unlock(&adev->pm.mutex); 3755 r = 0; 3756 } else { 3757 r = amdgpu_debugfs_pm_info_pp(m, adev); 3758 } 3759 if (r) 3760 goto out; 3761 3762 amdgpu_device_ip_get_clockgating_state(adev, &flags); 3763 3764 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 3765 amdgpu_parse_cg_state(m, flags); 3766 seq_printf(m, "\n"); 3767 3768 out: 3769 pm_runtime_mark_last_busy(dev->dev); 3770 pm_runtime_put_autosuspend(dev->dev); 3771 3772 return r; 3773 } 3774 3775 static const struct drm_info_list amdgpu_pm_info_list[] = { 3776 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 3777 }; 3778 #endif 3779 3780 int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 3781 { 3782 #if defined(CONFIG_DEBUG_FS) 3783 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 3784 #else 3785 return 0; 3786 #endif 3787 } 3788