1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25 26 #include "amdgpu.h" 27 #include "amdgpu_drv.h" 28 #include "amdgpu_pm.h" 29 #include "amdgpu_dpm.h" 30 #include "amdgpu_smu.h" 31 #include "atom.h" 32 #include <linux/pci.h> 33 #include <linux/hwmon.h> 34 #include <linux/hwmon-sysfs.h> 35 #include <linux/nospec.h> 36 #include <linux/pm_runtime.h> 37 #include <asm/processor.h> 38 #include "hwmgr.h" 39 40 static const struct cg_flag_name clocks[] = { 41 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, 42 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 43 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 44 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 45 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 46 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 47 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 48 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 49 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 50 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 51 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 52 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 53 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 54 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 55 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 56 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 57 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 58 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 59 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 60 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 61 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 62 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 63 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 64 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 65 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 66 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"}, 67 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"}, 68 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"}, 69 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"}, 70 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"}, 71 72 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 73 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 74 {0, NULL}, 75 }; 76 77 static const struct hwmon_temp_label { 78 enum PP_HWMON_TEMP channel; 79 const char *label; 80 } temp_label[] = { 81 {PP_TEMP_EDGE, "edge"}, 82 {PP_TEMP_JUNCTION, "junction"}, 83 {PP_TEMP_MEM, "mem"}, 84 }; 85 86 /** 87 * DOC: power_dpm_state 88 * 89 * The power_dpm_state file is a legacy interface and is only provided for 90 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 91 * certain power related parameters. The file power_dpm_state is used for this. 92 * It accepts the following arguments: 93 * 94 * - battery 95 * 96 * - balanced 97 * 98 * - performance 99 * 100 * battery 101 * 102 * On older GPUs, the vbios provided a special power state for battery 103 * operation. Selecting battery switched to this state. This is no 104 * longer provided on newer GPUs so the option does nothing in that case. 105 * 106 * balanced 107 * 108 * On older GPUs, the vbios provided a special power state for balanced 109 * operation. Selecting balanced switched to this state. This is no 110 * longer provided on newer GPUs so the option does nothing in that case. 111 * 112 * performance 113 * 114 * On older GPUs, the vbios provided a special power state for performance 115 * operation. Selecting performance switched to this state. This is no 116 * longer provided on newer GPUs so the option does nothing in that case. 117 * 118 */ 119 120 static ssize_t amdgpu_get_power_dpm_state(struct device *dev, 121 struct device_attribute *attr, 122 char *buf) 123 { 124 struct drm_device *ddev = dev_get_drvdata(dev); 125 struct amdgpu_device *adev = drm_to_adev(ddev); 126 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 127 enum amd_pm_state_type pm; 128 int ret; 129 130 if (amdgpu_in_reset(adev)) 131 return -EPERM; 132 133 ret = pm_runtime_get_sync(ddev->dev); 134 if (ret < 0) { 135 pm_runtime_put_autosuspend(ddev->dev); 136 return ret; 137 } 138 139 if (pp_funcs->get_current_power_state) { 140 pm = amdgpu_dpm_get_current_power_state(adev); 141 } else { 142 pm = adev->pm.dpm.user_state; 143 } 144 145 pm_runtime_mark_last_busy(ddev->dev); 146 pm_runtime_put_autosuspend(ddev->dev); 147 148 return snprintf(buf, PAGE_SIZE, "%s\n", 149 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 150 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 151 } 152 153 static ssize_t amdgpu_set_power_dpm_state(struct device *dev, 154 struct device_attribute *attr, 155 const char *buf, 156 size_t count) 157 { 158 struct drm_device *ddev = dev_get_drvdata(dev); 159 struct amdgpu_device *adev = drm_to_adev(ddev); 160 enum amd_pm_state_type state; 161 int ret; 162 163 if (amdgpu_in_reset(adev)) 164 return -EPERM; 165 166 if (strncmp("battery", buf, strlen("battery")) == 0) 167 state = POWER_STATE_TYPE_BATTERY; 168 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 169 state = POWER_STATE_TYPE_BALANCED; 170 else if (strncmp("performance", buf, strlen("performance")) == 0) 171 state = POWER_STATE_TYPE_PERFORMANCE; 172 else 173 return -EINVAL; 174 175 ret = pm_runtime_get_sync(ddev->dev); 176 if (ret < 0) { 177 pm_runtime_put_autosuspend(ddev->dev); 178 return ret; 179 } 180 181 if (is_support_sw_smu(adev)) { 182 mutex_lock(&adev->pm.mutex); 183 adev->pm.dpm.user_state = state; 184 mutex_unlock(&adev->pm.mutex); 185 } else if (adev->powerplay.pp_funcs->dispatch_tasks) { 186 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 187 } else { 188 mutex_lock(&adev->pm.mutex); 189 adev->pm.dpm.user_state = state; 190 mutex_unlock(&adev->pm.mutex); 191 192 amdgpu_pm_compute_clocks(adev); 193 } 194 pm_runtime_mark_last_busy(ddev->dev); 195 pm_runtime_put_autosuspend(ddev->dev); 196 197 return count; 198 } 199 200 201 /** 202 * DOC: power_dpm_force_performance_level 203 * 204 * The amdgpu driver provides a sysfs API for adjusting certain power 205 * related parameters. The file power_dpm_force_performance_level is 206 * used for this. It accepts the following arguments: 207 * 208 * - auto 209 * 210 * - low 211 * 212 * - high 213 * 214 * - manual 215 * 216 * - profile_standard 217 * 218 * - profile_min_sclk 219 * 220 * - profile_min_mclk 221 * 222 * - profile_peak 223 * 224 * auto 225 * 226 * When auto is selected, the driver will attempt to dynamically select 227 * the optimal power profile for current conditions in the driver. 228 * 229 * low 230 * 231 * When low is selected, the clocks are forced to the lowest power state. 232 * 233 * high 234 * 235 * When high is selected, the clocks are forced to the highest power state. 236 * 237 * manual 238 * 239 * When manual is selected, the user can manually adjust which power states 240 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 241 * and pp_dpm_pcie files and adjust the power state transition heuristics 242 * via the pp_power_profile_mode sysfs file. 243 * 244 * profile_standard 245 * profile_min_sclk 246 * profile_min_mclk 247 * profile_peak 248 * 249 * When the profiling modes are selected, clock and power gating are 250 * disabled and the clocks are set for different profiling cases. This 251 * mode is recommended for profiling specific work loads where you do 252 * not want clock or power gating for clock fluctuation to interfere 253 * with your results. profile_standard sets the clocks to a fixed clock 254 * level which varies from asic to asic. profile_min_sclk forces the sclk 255 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 256 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 257 * 258 */ 259 260 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, 261 struct device_attribute *attr, 262 char *buf) 263 { 264 struct drm_device *ddev = dev_get_drvdata(dev); 265 struct amdgpu_device *adev = drm_to_adev(ddev); 266 enum amd_dpm_forced_level level = 0xff; 267 int ret; 268 269 if (amdgpu_in_reset(adev)) 270 return -EPERM; 271 272 ret = pm_runtime_get_sync(ddev->dev); 273 if (ret < 0) { 274 pm_runtime_put_autosuspend(ddev->dev); 275 return ret; 276 } 277 278 if (adev->powerplay.pp_funcs->get_performance_level) 279 level = amdgpu_dpm_get_performance_level(adev); 280 else 281 level = adev->pm.dpm.forced_level; 282 283 pm_runtime_mark_last_busy(ddev->dev); 284 pm_runtime_put_autosuspend(ddev->dev); 285 286 return snprintf(buf, PAGE_SIZE, "%s\n", 287 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 288 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 289 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 290 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 291 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 292 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 293 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 294 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 295 "unknown"); 296 } 297 298 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, 299 struct device_attribute *attr, 300 const char *buf, 301 size_t count) 302 { 303 struct drm_device *ddev = dev_get_drvdata(dev); 304 struct amdgpu_device *adev = drm_to_adev(ddev); 305 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 306 enum amd_dpm_forced_level level; 307 enum amd_dpm_forced_level current_level = 0xff; 308 int ret = 0; 309 310 if (amdgpu_in_reset(adev)) 311 return -EPERM; 312 313 if (strncmp("low", buf, strlen("low")) == 0) { 314 level = AMD_DPM_FORCED_LEVEL_LOW; 315 } else if (strncmp("high", buf, strlen("high")) == 0) { 316 level = AMD_DPM_FORCED_LEVEL_HIGH; 317 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 318 level = AMD_DPM_FORCED_LEVEL_AUTO; 319 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 320 level = AMD_DPM_FORCED_LEVEL_MANUAL; 321 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 322 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 323 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 324 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 325 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 326 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 327 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 328 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 329 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 330 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 331 } else { 332 return -EINVAL; 333 } 334 335 ret = pm_runtime_get_sync(ddev->dev); 336 if (ret < 0) { 337 pm_runtime_put_autosuspend(ddev->dev); 338 return ret; 339 } 340 341 if (pp_funcs->get_performance_level) 342 current_level = amdgpu_dpm_get_performance_level(adev); 343 344 if (current_level == level) { 345 pm_runtime_mark_last_busy(ddev->dev); 346 pm_runtime_put_autosuspend(ddev->dev); 347 return count; 348 } 349 350 if (adev->asic_type == CHIP_RAVEN) { 351 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 352 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) 353 amdgpu_gfx_off_ctrl(adev, false); 354 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) 355 amdgpu_gfx_off_ctrl(adev, true); 356 } 357 } 358 359 /* profile_exit setting is valid only when current mode is in profile mode */ 360 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 361 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 362 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 363 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && 364 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 365 pr_err("Currently not in any profile mode!\n"); 366 pm_runtime_mark_last_busy(ddev->dev); 367 pm_runtime_put_autosuspend(ddev->dev); 368 return -EINVAL; 369 } 370 371 if (pp_funcs->force_performance_level) { 372 mutex_lock(&adev->pm.mutex); 373 if (adev->pm.dpm.thermal_active) { 374 mutex_unlock(&adev->pm.mutex); 375 pm_runtime_mark_last_busy(ddev->dev); 376 pm_runtime_put_autosuspend(ddev->dev); 377 return -EINVAL; 378 } 379 ret = amdgpu_dpm_force_performance_level(adev, level); 380 if (ret) { 381 mutex_unlock(&adev->pm.mutex); 382 pm_runtime_mark_last_busy(ddev->dev); 383 pm_runtime_put_autosuspend(ddev->dev); 384 return -EINVAL; 385 } else { 386 adev->pm.dpm.forced_level = level; 387 } 388 mutex_unlock(&adev->pm.mutex); 389 } 390 pm_runtime_mark_last_busy(ddev->dev); 391 pm_runtime_put_autosuspend(ddev->dev); 392 393 return count; 394 } 395 396 static ssize_t amdgpu_get_pp_num_states(struct device *dev, 397 struct device_attribute *attr, 398 char *buf) 399 { 400 struct drm_device *ddev = dev_get_drvdata(dev); 401 struct amdgpu_device *adev = drm_to_adev(ddev); 402 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 403 struct pp_states_info data; 404 int i, buf_len, ret; 405 406 if (amdgpu_in_reset(adev)) 407 return -EPERM; 408 409 ret = pm_runtime_get_sync(ddev->dev); 410 if (ret < 0) { 411 pm_runtime_put_autosuspend(ddev->dev); 412 return ret; 413 } 414 415 if (pp_funcs->get_pp_num_states) { 416 amdgpu_dpm_get_pp_num_states(adev, &data); 417 } else { 418 memset(&data, 0, sizeof(data)); 419 } 420 421 pm_runtime_mark_last_busy(ddev->dev); 422 pm_runtime_put_autosuspend(ddev->dev); 423 424 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 425 for (i = 0; i < data.nums; i++) 426 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 427 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 428 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 429 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 430 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 431 432 return buf_len; 433 } 434 435 static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 436 struct device_attribute *attr, 437 char *buf) 438 { 439 struct drm_device *ddev = dev_get_drvdata(dev); 440 struct amdgpu_device *adev = drm_to_adev(ddev); 441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 442 struct pp_states_info data; 443 enum amd_pm_state_type pm = 0; 444 int i = 0, ret = 0; 445 446 if (amdgpu_in_reset(adev)) 447 return -EPERM; 448 449 ret = pm_runtime_get_sync(ddev->dev); 450 if (ret < 0) { 451 pm_runtime_put_autosuspend(ddev->dev); 452 return ret; 453 } 454 455 if (pp_funcs->get_current_power_state 456 && pp_funcs->get_pp_num_states) { 457 pm = amdgpu_dpm_get_current_power_state(adev); 458 amdgpu_dpm_get_pp_num_states(adev, &data); 459 } 460 461 pm_runtime_mark_last_busy(ddev->dev); 462 pm_runtime_put_autosuspend(ddev->dev); 463 464 for (i = 0; i < data.nums; i++) { 465 if (pm == data.states[i]) 466 break; 467 } 468 469 if (i == data.nums) 470 i = -EINVAL; 471 472 return snprintf(buf, PAGE_SIZE, "%d\n", i); 473 } 474 475 static ssize_t amdgpu_get_pp_force_state(struct device *dev, 476 struct device_attribute *attr, 477 char *buf) 478 { 479 struct drm_device *ddev = dev_get_drvdata(dev); 480 struct amdgpu_device *adev = drm_to_adev(ddev); 481 482 if (amdgpu_in_reset(adev)) 483 return -EPERM; 484 485 if (adev->pp_force_state_enabled) 486 return amdgpu_get_pp_cur_state(dev, attr, buf); 487 else 488 return snprintf(buf, PAGE_SIZE, "\n"); 489 } 490 491 static ssize_t amdgpu_set_pp_force_state(struct device *dev, 492 struct device_attribute *attr, 493 const char *buf, 494 size_t count) 495 { 496 struct drm_device *ddev = dev_get_drvdata(dev); 497 struct amdgpu_device *adev = drm_to_adev(ddev); 498 enum amd_pm_state_type state = 0; 499 unsigned long idx; 500 int ret; 501 502 if (amdgpu_in_reset(adev)) 503 return -EPERM; 504 505 if (strlen(buf) == 1) 506 adev->pp_force_state_enabled = false; 507 else if (is_support_sw_smu(adev)) 508 adev->pp_force_state_enabled = false; 509 else if (adev->powerplay.pp_funcs->dispatch_tasks && 510 adev->powerplay.pp_funcs->get_pp_num_states) { 511 struct pp_states_info data; 512 513 ret = kstrtoul(buf, 0, &idx); 514 if (ret || idx >= ARRAY_SIZE(data.states)) 515 return -EINVAL; 516 517 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 518 519 amdgpu_dpm_get_pp_num_states(adev, &data); 520 state = data.states[idx]; 521 522 ret = pm_runtime_get_sync(ddev->dev); 523 if (ret < 0) { 524 pm_runtime_put_autosuspend(ddev->dev); 525 return ret; 526 } 527 528 /* only set user selected power states */ 529 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 530 state != POWER_STATE_TYPE_DEFAULT) { 531 amdgpu_dpm_dispatch_task(adev, 532 AMD_PP_TASK_ENABLE_USER_STATE, &state); 533 adev->pp_force_state_enabled = true; 534 } 535 pm_runtime_mark_last_busy(ddev->dev); 536 pm_runtime_put_autosuspend(ddev->dev); 537 } 538 539 return count; 540 } 541 542 /** 543 * DOC: pp_table 544 * 545 * The amdgpu driver provides a sysfs API for uploading new powerplay 546 * tables. The file pp_table is used for this. Reading the file 547 * will dump the current power play table. Writing to the file 548 * will attempt to upload a new powerplay table and re-initialize 549 * powerplay using that new table. 550 * 551 */ 552 553 static ssize_t amdgpu_get_pp_table(struct device *dev, 554 struct device_attribute *attr, 555 char *buf) 556 { 557 struct drm_device *ddev = dev_get_drvdata(dev); 558 struct amdgpu_device *adev = drm_to_adev(ddev); 559 char *table = NULL; 560 int size, ret; 561 562 if (amdgpu_in_reset(adev)) 563 return -EPERM; 564 565 ret = pm_runtime_get_sync(ddev->dev); 566 if (ret < 0) { 567 pm_runtime_put_autosuspend(ddev->dev); 568 return ret; 569 } 570 571 if (adev->powerplay.pp_funcs->get_pp_table) { 572 size = amdgpu_dpm_get_pp_table(adev, &table); 573 pm_runtime_mark_last_busy(ddev->dev); 574 pm_runtime_put_autosuspend(ddev->dev); 575 if (size < 0) 576 return size; 577 } else { 578 pm_runtime_mark_last_busy(ddev->dev); 579 pm_runtime_put_autosuspend(ddev->dev); 580 return 0; 581 } 582 583 if (size >= PAGE_SIZE) 584 size = PAGE_SIZE - 1; 585 586 memcpy(buf, table, size); 587 588 return size; 589 } 590 591 static ssize_t amdgpu_set_pp_table(struct device *dev, 592 struct device_attribute *attr, 593 const char *buf, 594 size_t count) 595 { 596 struct drm_device *ddev = dev_get_drvdata(dev); 597 struct amdgpu_device *adev = drm_to_adev(ddev); 598 int ret = 0; 599 600 if (amdgpu_in_reset(adev)) 601 return -EPERM; 602 603 ret = pm_runtime_get_sync(ddev->dev); 604 if (ret < 0) { 605 pm_runtime_put_autosuspend(ddev->dev); 606 return ret; 607 } 608 609 ret = amdgpu_dpm_set_pp_table(adev, buf, count); 610 if (ret) { 611 pm_runtime_mark_last_busy(ddev->dev); 612 pm_runtime_put_autosuspend(ddev->dev); 613 return ret; 614 } 615 616 pm_runtime_mark_last_busy(ddev->dev); 617 pm_runtime_put_autosuspend(ddev->dev); 618 619 return count; 620 } 621 622 /** 623 * DOC: pp_od_clk_voltage 624 * 625 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 626 * in each power level within a power state. The pp_od_clk_voltage is used for 627 * this. 628 * 629 * Note that the actual memory controller clock rate are exposed, not 630 * the effective memory clock of the DRAMs. To translate it, use the 631 * following formula: 632 * 633 * Clock conversion (Mhz): 634 * 635 * HBM: effective_memory_clock = memory_controller_clock * 1 636 * 637 * G5: effective_memory_clock = memory_controller_clock * 1 638 * 639 * G6: effective_memory_clock = memory_controller_clock * 2 640 * 641 * DRAM data rate (MT/s): 642 * 643 * HBM: effective_memory_clock * 2 = data_rate 644 * 645 * G5: effective_memory_clock * 4 = data_rate 646 * 647 * G6: effective_memory_clock * 8 = data_rate 648 * 649 * Bandwidth (MB/s): 650 * 651 * data_rate * vram_bit_width / 8 = memory_bandwidth 652 * 653 * Some examples: 654 * 655 * G5 on RX460: 656 * 657 * memory_controller_clock = 1750 Mhz 658 * 659 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz 660 * 661 * data rate = 1750 * 4 = 7000 MT/s 662 * 663 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s 664 * 665 * G6 on RX5700: 666 * 667 * memory_controller_clock = 875 Mhz 668 * 669 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz 670 * 671 * data rate = 1750 * 8 = 14000 MT/s 672 * 673 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s 674 * 675 * < For Vega10 and previous ASICs > 676 * 677 * Reading the file will display: 678 * 679 * - a list of engine clock levels and voltages labeled OD_SCLK 680 * 681 * - a list of memory clock levels and voltages labeled OD_MCLK 682 * 683 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 684 * 685 * To manually adjust these settings, first select manual using 686 * power_dpm_force_performance_level. Enter a new value for each 687 * level by writing a string that contains "s/m level clock voltage" to 688 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 689 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 690 * 810 mV. When you have edited all of the states as needed, write 691 * "c" (commit) to the file to commit your changes. If you want to reset to the 692 * default power levels, write "r" (reset) to the file to reset them. 693 * 694 * 695 * < For Vega20 and newer ASICs > 696 * 697 * Reading the file will display: 698 * 699 * - minimum and maximum engine clock labeled OD_SCLK 700 * 701 * - minimum(not available for Vega20 and Navi1x) and maximum memory 702 * clock labeled OD_MCLK 703 * 704 * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 705 * They can be used to calibrate the sclk voltage curve. 706 * 707 * - voltage offset(in mV) applied on target voltage calculation. 708 * This is available for Sienna Cichlid, Navy Flounder and Dimgrey 709 * Cavefish. For these ASICs, the target voltage calculation can be 710 * illustrated by "voltage = voltage calculated from v/f curve + 711 * overdrive vddgfx offset" 712 * 713 * - a list of valid ranges for sclk, mclk, and voltage curve points 714 * labeled OD_RANGE 715 * 716 * To manually adjust these settings: 717 * 718 * - First select manual using power_dpm_force_performance_level 719 * 720 * - For clock frequency setting, enter a new value by writing a 721 * string that contains "s/m index clock" to the file. The index 722 * should be 0 if to set minimum clock. And 1 if to set maximum 723 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 724 * "m 1 800" will update maximum mclk to be 800Mhz. 725 * 726 * For sclk voltage curve, enter the new values by writing a 727 * string that contains "vc point clock voltage" to the file. The 728 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will 729 * update point1 with clock set as 300Mhz and voltage as 730 * 600mV. "vc 2 1000 1000" will update point3 with clock set 731 * as 1000Mhz and voltage 1000mV. 732 * 733 * To update the voltage offset applied for gfxclk/voltage calculation, 734 * enter the new value by writing a string that contains "vo offset". 735 * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish. 736 * And the offset can be a positive or negative value. 737 * 738 * - When you have edited all of the states as needed, write "c" (commit) 739 * to the file to commit your changes 740 * 741 * - If you want to reset to the default power levels, write "r" (reset) 742 * to the file to reset them 743 * 744 */ 745 746 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 747 struct device_attribute *attr, 748 const char *buf, 749 size_t count) 750 { 751 struct drm_device *ddev = dev_get_drvdata(dev); 752 struct amdgpu_device *adev = drm_to_adev(ddev); 753 int ret; 754 uint32_t parameter_size = 0; 755 long parameter[64]; 756 char buf_cpy[128]; 757 char *tmp_str; 758 char *sub_str; 759 const char delimiter[3] = {' ', '\n', '\0'}; 760 uint32_t type; 761 762 if (amdgpu_in_reset(adev)) 763 return -EPERM; 764 765 if (count > 127) 766 return -EINVAL; 767 768 if (*buf == 's') 769 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 770 else if (*buf == 'p') 771 type = PP_OD_EDIT_CCLK_VDDC_TABLE; 772 else if (*buf == 'm') 773 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 774 else if(*buf == 'r') 775 type = PP_OD_RESTORE_DEFAULT_TABLE; 776 else if (*buf == 'c') 777 type = PP_OD_COMMIT_DPM_TABLE; 778 else if (!strncmp(buf, "vc", 2)) 779 type = PP_OD_EDIT_VDDC_CURVE; 780 else if (!strncmp(buf, "vo", 2)) 781 type = PP_OD_EDIT_VDDGFX_OFFSET; 782 else 783 return -EINVAL; 784 785 memcpy(buf_cpy, buf, count+1); 786 787 tmp_str = buf_cpy; 788 789 if ((type == PP_OD_EDIT_VDDC_CURVE) || 790 (type == PP_OD_EDIT_VDDGFX_OFFSET)) 791 tmp_str++; 792 while (isspace(*++tmp_str)); 793 794 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 795 if (strlen(sub_str) == 0) 796 continue; 797 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 798 if (ret) 799 return -EINVAL; 800 parameter_size++; 801 802 while (isspace(*tmp_str)) 803 tmp_str++; 804 } 805 806 ret = pm_runtime_get_sync(ddev->dev); 807 if (ret < 0) { 808 pm_runtime_put_autosuspend(ddev->dev); 809 return ret; 810 } 811 812 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { 813 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, 814 parameter, 815 parameter_size); 816 if (ret) { 817 pm_runtime_mark_last_busy(ddev->dev); 818 pm_runtime_put_autosuspend(ddev->dev); 819 return -EINVAL; 820 } 821 } 822 823 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { 824 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 825 parameter, parameter_size); 826 if (ret) { 827 pm_runtime_mark_last_busy(ddev->dev); 828 pm_runtime_put_autosuspend(ddev->dev); 829 return -EINVAL; 830 } 831 } 832 833 if (type == PP_OD_COMMIT_DPM_TABLE) { 834 if (adev->powerplay.pp_funcs->dispatch_tasks) { 835 amdgpu_dpm_dispatch_task(adev, 836 AMD_PP_TASK_READJUST_POWER_STATE, 837 NULL); 838 pm_runtime_mark_last_busy(ddev->dev); 839 pm_runtime_put_autosuspend(ddev->dev); 840 return count; 841 } else { 842 pm_runtime_mark_last_busy(ddev->dev); 843 pm_runtime_put_autosuspend(ddev->dev); 844 return -EINVAL; 845 } 846 } 847 848 pm_runtime_mark_last_busy(ddev->dev); 849 pm_runtime_put_autosuspend(ddev->dev); 850 851 return count; 852 } 853 854 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 855 struct device_attribute *attr, 856 char *buf) 857 { 858 struct drm_device *ddev = dev_get_drvdata(dev); 859 struct amdgpu_device *adev = drm_to_adev(ddev); 860 ssize_t size; 861 int ret; 862 863 if (amdgpu_in_reset(adev)) 864 return -EPERM; 865 866 ret = pm_runtime_get_sync(ddev->dev); 867 if (ret < 0) { 868 pm_runtime_put_autosuspend(ddev->dev); 869 return ret; 870 } 871 872 if (adev->powerplay.pp_funcs->print_clock_levels) { 873 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 874 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 875 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); 876 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size); 877 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 878 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size); 879 } else { 880 size = snprintf(buf, PAGE_SIZE, "\n"); 881 } 882 pm_runtime_mark_last_busy(ddev->dev); 883 pm_runtime_put_autosuspend(ddev->dev); 884 885 return size; 886 } 887 888 /** 889 * DOC: pp_features 890 * 891 * The amdgpu driver provides a sysfs API for adjusting what powerplay 892 * features to be enabled. The file pp_features is used for this. And 893 * this is only available for Vega10 and later dGPUs. 894 * 895 * Reading back the file will show you the followings: 896 * - Current ppfeature masks 897 * - List of the all supported powerplay features with their naming, 898 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 899 * 900 * To manually enable or disable a specific feature, just set or clear 901 * the corresponding bit from original ppfeature masks and input the 902 * new ppfeature masks. 903 */ 904 static ssize_t amdgpu_set_pp_features(struct device *dev, 905 struct device_attribute *attr, 906 const char *buf, 907 size_t count) 908 { 909 struct drm_device *ddev = dev_get_drvdata(dev); 910 struct amdgpu_device *adev = drm_to_adev(ddev); 911 uint64_t featuremask; 912 int ret; 913 914 if (amdgpu_in_reset(adev)) 915 return -EPERM; 916 917 ret = kstrtou64(buf, 0, &featuremask); 918 if (ret) 919 return -EINVAL; 920 921 ret = pm_runtime_get_sync(ddev->dev); 922 if (ret < 0) { 923 pm_runtime_put_autosuspend(ddev->dev); 924 return ret; 925 } 926 927 if (is_support_sw_smu(adev)) { 928 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); 929 if (ret) { 930 pm_runtime_mark_last_busy(ddev->dev); 931 pm_runtime_put_autosuspend(ddev->dev); 932 return -EINVAL; 933 } 934 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { 935 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 936 if (ret) { 937 pm_runtime_mark_last_busy(ddev->dev); 938 pm_runtime_put_autosuspend(ddev->dev); 939 return -EINVAL; 940 } 941 } 942 pm_runtime_mark_last_busy(ddev->dev); 943 pm_runtime_put_autosuspend(ddev->dev); 944 945 return count; 946 } 947 948 static ssize_t amdgpu_get_pp_features(struct device *dev, 949 struct device_attribute *attr, 950 char *buf) 951 { 952 struct drm_device *ddev = dev_get_drvdata(dev); 953 struct amdgpu_device *adev = drm_to_adev(ddev); 954 ssize_t size; 955 int ret; 956 957 if (amdgpu_in_reset(adev)) 958 return -EPERM; 959 960 ret = pm_runtime_get_sync(ddev->dev); 961 if (ret < 0) { 962 pm_runtime_put_autosuspend(ddev->dev); 963 return ret; 964 } 965 966 if (adev->powerplay.pp_funcs->get_ppfeature_status) 967 size = amdgpu_dpm_get_ppfeature_status(adev, buf); 968 else 969 size = snprintf(buf, PAGE_SIZE, "\n"); 970 971 pm_runtime_mark_last_busy(ddev->dev); 972 pm_runtime_put_autosuspend(ddev->dev); 973 974 return size; 975 } 976 977 /** 978 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie 979 * 980 * The amdgpu driver provides a sysfs API for adjusting what power levels 981 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 982 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 983 * this. 984 * 985 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 986 * Vega10 and later ASICs. 987 * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 988 * 989 * Reading back the files will show you the available power levels within 990 * the power state and the clock information for those levels. 991 * 992 * To manually adjust these states, first select manual using 993 * power_dpm_force_performance_level. 994 * Secondly, enter a new value for each level by inputing a string that 995 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 996 * E.g., 997 * 998 * .. code-block:: bash 999 * 1000 * echo "4 5 6" > pp_dpm_sclk 1001 * 1002 * will enable sclk levels 4, 5, and 6. 1003 * 1004 * NOTE: change to the dcefclk max dpm level is not supported now 1005 */ 1006 1007 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, 1008 enum pp_clock_type type, 1009 char *buf) 1010 { 1011 struct drm_device *ddev = dev_get_drvdata(dev); 1012 struct amdgpu_device *adev = drm_to_adev(ddev); 1013 ssize_t size; 1014 int ret; 1015 1016 if (amdgpu_in_reset(adev)) 1017 return -EPERM; 1018 1019 ret = pm_runtime_get_sync(ddev->dev); 1020 if (ret < 0) { 1021 pm_runtime_put_autosuspend(ddev->dev); 1022 return ret; 1023 } 1024 1025 if (adev->powerplay.pp_funcs->print_clock_levels) 1026 size = amdgpu_dpm_print_clock_levels(adev, type, buf); 1027 else 1028 size = snprintf(buf, PAGE_SIZE, "\n"); 1029 1030 pm_runtime_mark_last_busy(ddev->dev); 1031 pm_runtime_put_autosuspend(ddev->dev); 1032 1033 return size; 1034 } 1035 1036 /* 1037 * Worst case: 32 bits individually specified, in octal at 12 characters 1038 * per line (+1 for \n). 1039 */ 1040 #define AMDGPU_MASK_BUF_MAX (32 * 13) 1041 1042 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 1043 { 1044 int ret; 1045 unsigned long level; 1046 char *sub_str = NULL; 1047 char *tmp; 1048 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 1049 const char delimiter[3] = {' ', '\n', '\0'}; 1050 size_t bytes; 1051 1052 *mask = 0; 1053 1054 bytes = min(count, sizeof(buf_cpy) - 1); 1055 memcpy(buf_cpy, buf, bytes); 1056 buf_cpy[bytes] = '\0'; 1057 tmp = buf_cpy; 1058 while ((sub_str = strsep(&tmp, delimiter)) != NULL) { 1059 if (strlen(sub_str)) { 1060 ret = kstrtoul(sub_str, 0, &level); 1061 if (ret || level > 31) 1062 return -EINVAL; 1063 *mask |= 1 << level; 1064 } else 1065 break; 1066 } 1067 1068 return 0; 1069 } 1070 1071 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, 1072 enum pp_clock_type type, 1073 const char *buf, 1074 size_t count) 1075 { 1076 struct drm_device *ddev = dev_get_drvdata(dev); 1077 struct amdgpu_device *adev = drm_to_adev(ddev); 1078 int ret; 1079 uint32_t mask = 0; 1080 1081 if (amdgpu_in_reset(adev)) 1082 return -EPERM; 1083 1084 ret = amdgpu_read_mask(buf, count, &mask); 1085 if (ret) 1086 return ret; 1087 1088 ret = pm_runtime_get_sync(ddev->dev); 1089 if (ret < 0) { 1090 pm_runtime_put_autosuspend(ddev->dev); 1091 return ret; 1092 } 1093 1094 if (adev->powerplay.pp_funcs->force_clock_level) 1095 ret = amdgpu_dpm_force_clock_level(adev, type, mask); 1096 else 1097 ret = 0; 1098 1099 pm_runtime_mark_last_busy(ddev->dev); 1100 pm_runtime_put_autosuspend(ddev->dev); 1101 1102 if (ret) 1103 return -EINVAL; 1104 1105 return count; 1106 } 1107 1108 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 1109 struct device_attribute *attr, 1110 char *buf) 1111 { 1112 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf); 1113 } 1114 1115 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 1116 struct device_attribute *attr, 1117 const char *buf, 1118 size_t count) 1119 { 1120 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count); 1121 } 1122 1123 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 1124 struct device_attribute *attr, 1125 char *buf) 1126 { 1127 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf); 1128 } 1129 1130 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 1131 struct device_attribute *attr, 1132 const char *buf, 1133 size_t count) 1134 { 1135 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count); 1136 } 1137 1138 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 1139 struct device_attribute *attr, 1140 char *buf) 1141 { 1142 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf); 1143 } 1144 1145 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 1146 struct device_attribute *attr, 1147 const char *buf, 1148 size_t count) 1149 { 1150 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count); 1151 } 1152 1153 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1154 struct device_attribute *attr, 1155 char *buf) 1156 { 1157 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf); 1158 } 1159 1160 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1161 struct device_attribute *attr, 1162 const char *buf, 1163 size_t count) 1164 { 1165 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count); 1166 } 1167 1168 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev, 1169 struct device_attribute *attr, 1170 char *buf) 1171 { 1172 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf); 1173 } 1174 1175 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, 1176 struct device_attribute *attr, 1177 const char *buf, 1178 size_t count) 1179 { 1180 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count); 1181 } 1182 1183 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev, 1184 struct device_attribute *attr, 1185 char *buf) 1186 { 1187 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf); 1188 } 1189 1190 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, 1191 struct device_attribute *attr, 1192 const char *buf, 1193 size_t count) 1194 { 1195 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count); 1196 } 1197 1198 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1199 struct device_attribute *attr, 1200 char *buf) 1201 { 1202 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf); 1203 } 1204 1205 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1206 struct device_attribute *attr, 1207 const char *buf, 1208 size_t count) 1209 { 1210 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count); 1211 } 1212 1213 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1214 struct device_attribute *attr, 1215 char *buf) 1216 { 1217 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf); 1218 } 1219 1220 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1221 struct device_attribute *attr, 1222 const char *buf, 1223 size_t count) 1224 { 1225 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count); 1226 } 1227 1228 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1229 struct device_attribute *attr, 1230 char *buf) 1231 { 1232 struct drm_device *ddev = dev_get_drvdata(dev); 1233 struct amdgpu_device *adev = drm_to_adev(ddev); 1234 uint32_t value = 0; 1235 int ret; 1236 1237 if (amdgpu_in_reset(adev)) 1238 return -EPERM; 1239 1240 ret = pm_runtime_get_sync(ddev->dev); 1241 if (ret < 0) { 1242 pm_runtime_put_autosuspend(ddev->dev); 1243 return ret; 1244 } 1245 1246 if (is_support_sw_smu(adev)) 1247 value = 0; 1248 else if (adev->powerplay.pp_funcs->get_sclk_od) 1249 value = amdgpu_dpm_get_sclk_od(adev); 1250 1251 pm_runtime_mark_last_busy(ddev->dev); 1252 pm_runtime_put_autosuspend(ddev->dev); 1253 1254 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1255 } 1256 1257 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1258 struct device_attribute *attr, 1259 const char *buf, 1260 size_t count) 1261 { 1262 struct drm_device *ddev = dev_get_drvdata(dev); 1263 struct amdgpu_device *adev = drm_to_adev(ddev); 1264 int ret; 1265 long int value; 1266 1267 if (amdgpu_in_reset(adev)) 1268 return -EPERM; 1269 1270 ret = kstrtol(buf, 0, &value); 1271 1272 if (ret) 1273 return -EINVAL; 1274 1275 ret = pm_runtime_get_sync(ddev->dev); 1276 if (ret < 0) { 1277 pm_runtime_put_autosuspend(ddev->dev); 1278 return ret; 1279 } 1280 1281 if (is_support_sw_smu(adev)) { 1282 value = 0; 1283 } else { 1284 if (adev->powerplay.pp_funcs->set_sclk_od) 1285 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1286 1287 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1288 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1289 } else { 1290 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1291 amdgpu_pm_compute_clocks(adev); 1292 } 1293 } 1294 1295 pm_runtime_mark_last_busy(ddev->dev); 1296 pm_runtime_put_autosuspend(ddev->dev); 1297 1298 return count; 1299 } 1300 1301 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1302 struct device_attribute *attr, 1303 char *buf) 1304 { 1305 struct drm_device *ddev = dev_get_drvdata(dev); 1306 struct amdgpu_device *adev = drm_to_adev(ddev); 1307 uint32_t value = 0; 1308 int ret; 1309 1310 if (amdgpu_in_reset(adev)) 1311 return -EPERM; 1312 1313 ret = pm_runtime_get_sync(ddev->dev); 1314 if (ret < 0) { 1315 pm_runtime_put_autosuspend(ddev->dev); 1316 return ret; 1317 } 1318 1319 if (is_support_sw_smu(adev)) 1320 value = 0; 1321 else if (adev->powerplay.pp_funcs->get_mclk_od) 1322 value = amdgpu_dpm_get_mclk_od(adev); 1323 1324 pm_runtime_mark_last_busy(ddev->dev); 1325 pm_runtime_put_autosuspend(ddev->dev); 1326 1327 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1328 } 1329 1330 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1331 struct device_attribute *attr, 1332 const char *buf, 1333 size_t count) 1334 { 1335 struct drm_device *ddev = dev_get_drvdata(dev); 1336 struct amdgpu_device *adev = drm_to_adev(ddev); 1337 int ret; 1338 long int value; 1339 1340 if (amdgpu_in_reset(adev)) 1341 return -EPERM; 1342 1343 ret = kstrtol(buf, 0, &value); 1344 1345 if (ret) 1346 return -EINVAL; 1347 1348 ret = pm_runtime_get_sync(ddev->dev); 1349 if (ret < 0) { 1350 pm_runtime_put_autosuspend(ddev->dev); 1351 return ret; 1352 } 1353 1354 if (is_support_sw_smu(adev)) { 1355 value = 0; 1356 } else { 1357 if (adev->powerplay.pp_funcs->set_mclk_od) 1358 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1359 1360 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1361 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1362 } else { 1363 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1364 amdgpu_pm_compute_clocks(adev); 1365 } 1366 } 1367 1368 pm_runtime_mark_last_busy(ddev->dev); 1369 pm_runtime_put_autosuspend(ddev->dev); 1370 1371 return count; 1372 } 1373 1374 /** 1375 * DOC: pp_power_profile_mode 1376 * 1377 * The amdgpu driver provides a sysfs API for adjusting the heuristics 1378 * related to switching between power levels in a power state. The file 1379 * pp_power_profile_mode is used for this. 1380 * 1381 * Reading this file outputs a list of all of the predefined power profiles 1382 * and the relevant heuristics settings for that profile. 1383 * 1384 * To select a profile or create a custom profile, first select manual using 1385 * power_dpm_force_performance_level. Writing the number of a predefined 1386 * profile to pp_power_profile_mode will enable those heuristics. To 1387 * create a custom set of heuristics, write a string of numbers to the file 1388 * starting with the number of the custom profile along with a setting 1389 * for each heuristic parameter. Due to differences across asic families 1390 * the heuristic parameters vary from family to family. 1391 * 1392 */ 1393 1394 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1395 struct device_attribute *attr, 1396 char *buf) 1397 { 1398 struct drm_device *ddev = dev_get_drvdata(dev); 1399 struct amdgpu_device *adev = drm_to_adev(ddev); 1400 ssize_t size; 1401 int ret; 1402 1403 if (amdgpu_in_reset(adev)) 1404 return -EPERM; 1405 1406 ret = pm_runtime_get_sync(ddev->dev); 1407 if (ret < 0) { 1408 pm_runtime_put_autosuspend(ddev->dev); 1409 return ret; 1410 } 1411 1412 if (adev->powerplay.pp_funcs->get_power_profile_mode) 1413 size = amdgpu_dpm_get_power_profile_mode(adev, buf); 1414 else 1415 size = snprintf(buf, PAGE_SIZE, "\n"); 1416 1417 pm_runtime_mark_last_busy(ddev->dev); 1418 pm_runtime_put_autosuspend(ddev->dev); 1419 1420 return size; 1421 } 1422 1423 1424 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1425 struct device_attribute *attr, 1426 const char *buf, 1427 size_t count) 1428 { 1429 int ret; 1430 struct drm_device *ddev = dev_get_drvdata(dev); 1431 struct amdgpu_device *adev = drm_to_adev(ddev); 1432 uint32_t parameter_size = 0; 1433 long parameter[64]; 1434 char *sub_str, buf_cpy[128]; 1435 char *tmp_str; 1436 uint32_t i = 0; 1437 char tmp[2]; 1438 long int profile_mode = 0; 1439 const char delimiter[3] = {' ', '\n', '\0'}; 1440 1441 if (amdgpu_in_reset(adev)) 1442 return -EPERM; 1443 1444 tmp[0] = *(buf); 1445 tmp[1] = '\0'; 1446 ret = kstrtol(tmp, 0, &profile_mode); 1447 if (ret) 1448 return -EINVAL; 1449 1450 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1451 if (count < 2 || count > 127) 1452 return -EINVAL; 1453 while (isspace(*++buf)) 1454 i++; 1455 memcpy(buf_cpy, buf, count-i); 1456 tmp_str = buf_cpy; 1457 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 1458 if (strlen(sub_str) == 0) 1459 continue; 1460 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 1461 if (ret) 1462 return -EINVAL; 1463 parameter_size++; 1464 while (isspace(*tmp_str)) 1465 tmp_str++; 1466 } 1467 } 1468 parameter[parameter_size] = profile_mode; 1469 1470 ret = pm_runtime_get_sync(ddev->dev); 1471 if (ret < 0) { 1472 pm_runtime_put_autosuspend(ddev->dev); 1473 return ret; 1474 } 1475 1476 if (adev->powerplay.pp_funcs->set_power_profile_mode) 1477 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1478 1479 pm_runtime_mark_last_busy(ddev->dev); 1480 pm_runtime_put_autosuspend(ddev->dev); 1481 1482 if (!ret) 1483 return count; 1484 1485 return -EINVAL; 1486 } 1487 1488 /** 1489 * DOC: gpu_busy_percent 1490 * 1491 * The amdgpu driver provides a sysfs API for reading how busy the GPU 1492 * is as a percentage. The file gpu_busy_percent is used for this. 1493 * The SMU firmware computes a percentage of load based on the 1494 * aggregate activity level in the IP cores. 1495 */ 1496 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, 1497 struct device_attribute *attr, 1498 char *buf) 1499 { 1500 struct drm_device *ddev = dev_get_drvdata(dev); 1501 struct amdgpu_device *adev = drm_to_adev(ddev); 1502 int r, value, size = sizeof(value); 1503 1504 if (amdgpu_in_reset(adev)) 1505 return -EPERM; 1506 1507 r = pm_runtime_get_sync(ddev->dev); 1508 if (r < 0) { 1509 pm_runtime_put_autosuspend(ddev->dev); 1510 return r; 1511 } 1512 1513 /* read the IP busy sensor */ 1514 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1515 (void *)&value, &size); 1516 1517 pm_runtime_mark_last_busy(ddev->dev); 1518 pm_runtime_put_autosuspend(ddev->dev); 1519 1520 if (r) 1521 return r; 1522 1523 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1524 } 1525 1526 /** 1527 * DOC: mem_busy_percent 1528 * 1529 * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1530 * is as a percentage. The file mem_busy_percent is used for this. 1531 * The SMU firmware computes a percentage of load based on the 1532 * aggregate activity level in the IP cores. 1533 */ 1534 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, 1535 struct device_attribute *attr, 1536 char *buf) 1537 { 1538 struct drm_device *ddev = dev_get_drvdata(dev); 1539 struct amdgpu_device *adev = drm_to_adev(ddev); 1540 int r, value, size = sizeof(value); 1541 1542 if (amdgpu_in_reset(adev)) 1543 return -EPERM; 1544 1545 r = pm_runtime_get_sync(ddev->dev); 1546 if (r < 0) { 1547 pm_runtime_put_autosuspend(ddev->dev); 1548 return r; 1549 } 1550 1551 /* read the IP busy sensor */ 1552 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, 1553 (void *)&value, &size); 1554 1555 pm_runtime_mark_last_busy(ddev->dev); 1556 pm_runtime_put_autosuspend(ddev->dev); 1557 1558 if (r) 1559 return r; 1560 1561 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1562 } 1563 1564 /** 1565 * DOC: pcie_bw 1566 * 1567 * The amdgpu driver provides a sysfs API for estimating how much data 1568 * has been received and sent by the GPU in the last second through PCIe. 1569 * The file pcie_bw is used for this. 1570 * The Perf counters count the number of received and sent messages and return 1571 * those values, as well as the maximum payload size of a PCIe packet (mps). 1572 * Note that it is not possible to easily and quickly obtain the size of each 1573 * packet transmitted, so we output the max payload size (mps) to allow for 1574 * quick estimation of the PCIe bandwidth usage 1575 */ 1576 static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1577 struct device_attribute *attr, 1578 char *buf) 1579 { 1580 struct drm_device *ddev = dev_get_drvdata(dev); 1581 struct amdgpu_device *adev = drm_to_adev(ddev); 1582 uint64_t count0 = 0, count1 = 0; 1583 int ret; 1584 1585 if (amdgpu_in_reset(adev)) 1586 return -EPERM; 1587 1588 if (adev->flags & AMD_IS_APU) 1589 return -ENODATA; 1590 1591 if (!adev->asic_funcs->get_pcie_usage) 1592 return -ENODATA; 1593 1594 ret = pm_runtime_get_sync(ddev->dev); 1595 if (ret < 0) { 1596 pm_runtime_put_autosuspend(ddev->dev); 1597 return ret; 1598 } 1599 1600 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1601 1602 pm_runtime_mark_last_busy(ddev->dev); 1603 pm_runtime_put_autosuspend(ddev->dev); 1604 1605 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 1606 count0, count1, pcie_get_mps(adev->pdev)); 1607 } 1608 1609 /** 1610 * DOC: unique_id 1611 * 1612 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 1613 * The file unique_id is used for this. 1614 * This will provide a Unique ID that will persist from machine to machine 1615 * 1616 * NOTE: This will only work for GFX9 and newer. This file will be absent 1617 * on unsupported ASICs (GFX8 and older) 1618 */ 1619 static ssize_t amdgpu_get_unique_id(struct device *dev, 1620 struct device_attribute *attr, 1621 char *buf) 1622 { 1623 struct drm_device *ddev = dev_get_drvdata(dev); 1624 struct amdgpu_device *adev = drm_to_adev(ddev); 1625 1626 if (amdgpu_in_reset(adev)) 1627 return -EPERM; 1628 1629 if (adev->unique_id) 1630 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); 1631 1632 return 0; 1633 } 1634 1635 /** 1636 * DOC: thermal_throttling_logging 1637 * 1638 * Thermal throttling pulls down the clock frequency and thus the performance. 1639 * It's an useful mechanism to protect the chip from overheating. Since it 1640 * impacts performance, the user controls whether it is enabled and if so, 1641 * the log frequency. 1642 * 1643 * Reading back the file shows you the status(enabled or disabled) and 1644 * the interval(in seconds) between each thermal logging. 1645 * 1646 * Writing an integer to the file, sets a new logging interval, in seconds. 1647 * The value should be between 1 and 3600. If the value is less than 1, 1648 * thermal logging is disabled. Values greater than 3600 are ignored. 1649 */ 1650 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, 1651 struct device_attribute *attr, 1652 char *buf) 1653 { 1654 struct drm_device *ddev = dev_get_drvdata(dev); 1655 struct amdgpu_device *adev = drm_to_adev(ddev); 1656 1657 return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n", 1658 adev_to_drm(adev)->unique, 1659 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", 1660 adev->throttling_logging_rs.interval / HZ + 1); 1661 } 1662 1663 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, 1664 struct device_attribute *attr, 1665 const char *buf, 1666 size_t count) 1667 { 1668 struct drm_device *ddev = dev_get_drvdata(dev); 1669 struct amdgpu_device *adev = drm_to_adev(ddev); 1670 long throttling_logging_interval; 1671 unsigned long flags; 1672 int ret = 0; 1673 1674 ret = kstrtol(buf, 0, &throttling_logging_interval); 1675 if (ret) 1676 return ret; 1677 1678 if (throttling_logging_interval > 3600) 1679 return -EINVAL; 1680 1681 if (throttling_logging_interval > 0) { 1682 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags); 1683 /* 1684 * Reset the ratelimit timer internals. 1685 * This can effectively restart the timer. 1686 */ 1687 adev->throttling_logging_rs.interval = 1688 (throttling_logging_interval - 1) * HZ; 1689 adev->throttling_logging_rs.begin = 0; 1690 adev->throttling_logging_rs.printed = 0; 1691 adev->throttling_logging_rs.missed = 0; 1692 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags); 1693 1694 atomic_set(&adev->throttling_logging_enabled, 1); 1695 } else { 1696 atomic_set(&adev->throttling_logging_enabled, 0); 1697 } 1698 1699 return count; 1700 } 1701 1702 /** 1703 * DOC: gpu_metrics 1704 * 1705 * The amdgpu driver provides a sysfs API for retrieving current gpu 1706 * metrics data. The file gpu_metrics is used for this. Reading the 1707 * file will dump all the current gpu metrics data. 1708 * 1709 * These data include temperature, frequency, engines utilization, 1710 * power consume, throttler status, fan speed and cpu core statistics( 1711 * available for APU only). That's it will give a snapshot of all sensors 1712 * at the same time. 1713 */ 1714 static ssize_t amdgpu_get_gpu_metrics(struct device *dev, 1715 struct device_attribute *attr, 1716 char *buf) 1717 { 1718 struct drm_device *ddev = dev_get_drvdata(dev); 1719 struct amdgpu_device *adev = drm_to_adev(ddev); 1720 void *gpu_metrics; 1721 ssize_t size = 0; 1722 int ret; 1723 1724 if (amdgpu_in_reset(adev)) 1725 return -EPERM; 1726 1727 ret = pm_runtime_get_sync(ddev->dev); 1728 if (ret < 0) { 1729 pm_runtime_put_autosuspend(ddev->dev); 1730 return ret; 1731 } 1732 1733 if (adev->powerplay.pp_funcs->get_gpu_metrics) 1734 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); 1735 1736 if (size <= 0) 1737 goto out; 1738 1739 if (size >= PAGE_SIZE) 1740 size = PAGE_SIZE - 1; 1741 1742 memcpy(buf, gpu_metrics, size); 1743 1744 out: 1745 pm_runtime_mark_last_busy(ddev->dev); 1746 pm_runtime_put_autosuspend(ddev->dev); 1747 1748 return size; 1749 } 1750 1751 static struct amdgpu_device_attr amdgpu_device_attrs[] = { 1752 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1753 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1754 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC), 1755 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC), 1756 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC), 1757 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), 1758 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1759 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1760 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1761 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1762 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1763 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1764 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC), 1765 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), 1766 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), 1767 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), 1768 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), 1769 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), 1770 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), 1771 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), 1772 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), 1773 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), 1774 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), 1775 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC), 1776 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC), 1777 }; 1778 1779 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1780 uint32_t mask, enum amdgpu_device_attr_states *states) 1781 { 1782 struct device_attribute *dev_attr = &attr->dev_attr; 1783 const char *attr_name = dev_attr->attr.name; 1784 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1785 enum amd_asic_type asic_type = adev->asic_type; 1786 1787 if (!(attr->flags & mask)) { 1788 *states = ATTR_STATE_UNSUPPORTED; 1789 return 0; 1790 } 1791 1792 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) 1793 1794 if (DEVICE_ATTR_IS(pp_dpm_socclk)) { 1795 if (asic_type < CHIP_VEGA10) 1796 *states = ATTR_STATE_UNSUPPORTED; 1797 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 1798 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) 1799 *states = ATTR_STATE_UNSUPPORTED; 1800 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { 1801 if (asic_type < CHIP_VEGA20) 1802 *states = ATTR_STATE_UNSUPPORTED; 1803 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { 1804 *states = ATTR_STATE_UNSUPPORTED; 1805 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 1806 (is_support_sw_smu(adev) && adev->smu.is_apu) || 1807 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1808 *states = ATTR_STATE_SUPPORTED; 1809 } else if (DEVICE_ATTR_IS(mem_busy_percent)) { 1810 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10) 1811 *states = ATTR_STATE_UNSUPPORTED; 1812 } else if (DEVICE_ATTR_IS(pcie_bw)) { 1813 /* PCIe Perf counters won't work on APU nodes */ 1814 if (adev->flags & AMD_IS_APU) 1815 *states = ATTR_STATE_UNSUPPORTED; 1816 } else if (DEVICE_ATTR_IS(unique_id)) { 1817 if (asic_type != CHIP_VEGA10 && 1818 asic_type != CHIP_VEGA20 && 1819 asic_type != CHIP_ARCTURUS) 1820 *states = ATTR_STATE_UNSUPPORTED; 1821 } else if (DEVICE_ATTR_IS(pp_features)) { 1822 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10) 1823 *states = ATTR_STATE_UNSUPPORTED; 1824 } else if (DEVICE_ATTR_IS(gpu_metrics)) { 1825 if (asic_type < CHIP_VEGA12) 1826 *states = ATTR_STATE_UNSUPPORTED; 1827 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { 1828 if (!(asic_type == CHIP_VANGOGH)) 1829 *states = ATTR_STATE_UNSUPPORTED; 1830 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { 1831 if (!(asic_type == CHIP_VANGOGH)) 1832 *states = ATTR_STATE_UNSUPPORTED; 1833 } 1834 1835 if (asic_type == CHIP_ARCTURUS) { 1836 /* Arcturus does not support standalone mclk/socclk/fclk level setting */ 1837 if (DEVICE_ATTR_IS(pp_dpm_mclk) || 1838 DEVICE_ATTR_IS(pp_dpm_socclk) || 1839 DEVICE_ATTR_IS(pp_dpm_fclk)) { 1840 dev_attr->attr.mode &= ~S_IWUGO; 1841 dev_attr->store = NULL; 1842 } 1843 } 1844 1845 #undef DEVICE_ATTR_IS 1846 1847 return 0; 1848 } 1849 1850 1851 static int amdgpu_device_attr_create(struct amdgpu_device *adev, 1852 struct amdgpu_device_attr *attr, 1853 uint32_t mask, struct list_head *attr_list) 1854 { 1855 int ret = 0; 1856 struct device_attribute *dev_attr = &attr->dev_attr; 1857 const char *name = dev_attr->attr.name; 1858 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; 1859 struct amdgpu_device_attr_entry *attr_entry; 1860 1861 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1862 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; 1863 1864 BUG_ON(!attr); 1865 1866 attr_update = attr->attr_update ? attr_update : default_attr_update; 1867 1868 ret = attr_update(adev, attr, mask, &attr_states); 1869 if (ret) { 1870 dev_err(adev->dev, "failed to update device file %s, ret = %d\n", 1871 name, ret); 1872 return ret; 1873 } 1874 1875 if (attr_states == ATTR_STATE_UNSUPPORTED) 1876 return 0; 1877 1878 ret = device_create_file(adev->dev, dev_attr); 1879 if (ret) { 1880 dev_err(adev->dev, "failed to create device file %s, ret = %d\n", 1881 name, ret); 1882 } 1883 1884 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL); 1885 if (!attr_entry) 1886 return -ENOMEM; 1887 1888 attr_entry->attr = attr; 1889 INIT_LIST_HEAD(&attr_entry->entry); 1890 1891 list_add_tail(&attr_entry->entry, attr_list); 1892 1893 return ret; 1894 } 1895 1896 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr) 1897 { 1898 struct device_attribute *dev_attr = &attr->dev_attr; 1899 1900 device_remove_file(adev->dev, dev_attr); 1901 } 1902 1903 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 1904 struct list_head *attr_list); 1905 1906 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev, 1907 struct amdgpu_device_attr *attrs, 1908 uint32_t counts, 1909 uint32_t mask, 1910 struct list_head *attr_list) 1911 { 1912 int ret = 0; 1913 uint32_t i = 0; 1914 1915 for (i = 0; i < counts; i++) { 1916 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list); 1917 if (ret) 1918 goto failed; 1919 } 1920 1921 return 0; 1922 1923 failed: 1924 amdgpu_device_attr_remove_groups(adev, attr_list); 1925 1926 return ret; 1927 } 1928 1929 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 1930 struct list_head *attr_list) 1931 { 1932 struct amdgpu_device_attr_entry *entry, *entry_tmp; 1933 1934 if (list_empty(attr_list)) 1935 return ; 1936 1937 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) { 1938 amdgpu_device_attr_remove(adev, entry->attr); 1939 list_del(&entry->entry); 1940 kfree(entry); 1941 } 1942 } 1943 1944 static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 1945 struct device_attribute *attr, 1946 char *buf) 1947 { 1948 struct amdgpu_device *adev = dev_get_drvdata(dev); 1949 int channel = to_sensor_dev_attr(attr)->index; 1950 int r, temp = 0, size = sizeof(temp); 1951 1952 if (amdgpu_in_reset(adev)) 1953 return -EPERM; 1954 1955 if (channel >= PP_TEMP_MAX) 1956 return -EINVAL; 1957 1958 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1959 if (r < 0) { 1960 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1961 return r; 1962 } 1963 1964 switch (channel) { 1965 case PP_TEMP_JUNCTION: 1966 /* get current junction temperature */ 1967 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1968 (void *)&temp, &size); 1969 break; 1970 case PP_TEMP_EDGE: 1971 /* get current edge temperature */ 1972 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 1973 (void *)&temp, &size); 1974 break; 1975 case PP_TEMP_MEM: 1976 /* get current memory temperature */ 1977 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 1978 (void *)&temp, &size); 1979 break; 1980 default: 1981 r = -EINVAL; 1982 break; 1983 } 1984 1985 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1986 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1987 1988 if (r) 1989 return r; 1990 1991 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1992 } 1993 1994 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 1995 struct device_attribute *attr, 1996 char *buf) 1997 { 1998 struct amdgpu_device *adev = dev_get_drvdata(dev); 1999 int hyst = to_sensor_dev_attr(attr)->index; 2000 int temp; 2001 2002 if (hyst) 2003 temp = adev->pm.dpm.thermal.min_temp; 2004 else 2005 temp = adev->pm.dpm.thermal.max_temp; 2006 2007 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2008 } 2009 2010 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 2011 struct device_attribute *attr, 2012 char *buf) 2013 { 2014 struct amdgpu_device *adev = dev_get_drvdata(dev); 2015 int hyst = to_sensor_dev_attr(attr)->index; 2016 int temp; 2017 2018 if (hyst) 2019 temp = adev->pm.dpm.thermal.min_hotspot_temp; 2020 else 2021 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 2022 2023 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2024 } 2025 2026 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 2027 struct device_attribute *attr, 2028 char *buf) 2029 { 2030 struct amdgpu_device *adev = dev_get_drvdata(dev); 2031 int hyst = to_sensor_dev_attr(attr)->index; 2032 int temp; 2033 2034 if (hyst) 2035 temp = adev->pm.dpm.thermal.min_mem_temp; 2036 else 2037 temp = adev->pm.dpm.thermal.max_mem_crit_temp; 2038 2039 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2040 } 2041 2042 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 2043 struct device_attribute *attr, 2044 char *buf) 2045 { 2046 int channel = to_sensor_dev_attr(attr)->index; 2047 2048 if (channel >= PP_TEMP_MAX) 2049 return -EINVAL; 2050 2051 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); 2052 } 2053 2054 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 2055 struct device_attribute *attr, 2056 char *buf) 2057 { 2058 struct amdgpu_device *adev = dev_get_drvdata(dev); 2059 int channel = to_sensor_dev_attr(attr)->index; 2060 int temp = 0; 2061 2062 if (channel >= PP_TEMP_MAX) 2063 return -EINVAL; 2064 2065 switch (channel) { 2066 case PP_TEMP_JUNCTION: 2067 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 2068 break; 2069 case PP_TEMP_EDGE: 2070 temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 2071 break; 2072 case PP_TEMP_MEM: 2073 temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 2074 break; 2075 } 2076 2077 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2078 } 2079 2080 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 2081 struct device_attribute *attr, 2082 char *buf) 2083 { 2084 struct amdgpu_device *adev = dev_get_drvdata(dev); 2085 u32 pwm_mode = 0; 2086 int ret; 2087 2088 if (amdgpu_in_reset(adev)) 2089 return -EPERM; 2090 2091 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2092 if (ret < 0) { 2093 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2094 return ret; 2095 } 2096 2097 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2098 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2099 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2100 return -EINVAL; 2101 } 2102 2103 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2104 2105 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2106 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2107 2108 return sprintf(buf, "%u\n", pwm_mode); 2109 } 2110 2111 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 2112 struct device_attribute *attr, 2113 const char *buf, 2114 size_t count) 2115 { 2116 struct amdgpu_device *adev = dev_get_drvdata(dev); 2117 int err, ret; 2118 int value; 2119 2120 if (amdgpu_in_reset(adev)) 2121 return -EPERM; 2122 2123 err = kstrtoint(buf, 10, &value); 2124 if (err) 2125 return err; 2126 2127 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2128 if (ret < 0) { 2129 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2130 return ret; 2131 } 2132 2133 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2134 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2135 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2136 return -EINVAL; 2137 } 2138 2139 amdgpu_dpm_set_fan_control_mode(adev, value); 2140 2141 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2142 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2143 2144 return count; 2145 } 2146 2147 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 2148 struct device_attribute *attr, 2149 char *buf) 2150 { 2151 return sprintf(buf, "%i\n", 0); 2152 } 2153 2154 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 2155 struct device_attribute *attr, 2156 char *buf) 2157 { 2158 return sprintf(buf, "%i\n", 255); 2159 } 2160 2161 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 2162 struct device_attribute *attr, 2163 const char *buf, size_t count) 2164 { 2165 struct amdgpu_device *adev = dev_get_drvdata(dev); 2166 int err; 2167 u32 value; 2168 u32 pwm_mode; 2169 2170 if (amdgpu_in_reset(adev)) 2171 return -EPERM; 2172 2173 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2174 if (err < 0) { 2175 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2176 return err; 2177 } 2178 2179 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2180 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2181 pr_info("manual fan speed control should be enabled first\n"); 2182 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2183 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2184 return -EINVAL; 2185 } 2186 2187 err = kstrtou32(buf, 10, &value); 2188 if (err) { 2189 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2190 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2191 return err; 2192 } 2193 2194 value = (value * 100) / 255; 2195 2196 if (adev->powerplay.pp_funcs->set_fan_speed_percent) 2197 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 2198 else 2199 err = -EINVAL; 2200 2201 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2202 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2203 2204 if (err) 2205 return err; 2206 2207 return count; 2208 } 2209 2210 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 2211 struct device_attribute *attr, 2212 char *buf) 2213 { 2214 struct amdgpu_device *adev = dev_get_drvdata(dev); 2215 int err; 2216 u32 speed = 0; 2217 2218 if (amdgpu_in_reset(adev)) 2219 return -EPERM; 2220 2221 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2222 if (err < 0) { 2223 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2224 return err; 2225 } 2226 2227 if (adev->powerplay.pp_funcs->get_fan_speed_percent) 2228 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 2229 else 2230 err = -EINVAL; 2231 2232 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2233 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2234 2235 if (err) 2236 return err; 2237 2238 speed = (speed * 255) / 100; 2239 2240 return sprintf(buf, "%i\n", speed); 2241 } 2242 2243 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 2244 struct device_attribute *attr, 2245 char *buf) 2246 { 2247 struct amdgpu_device *adev = dev_get_drvdata(dev); 2248 int err; 2249 u32 speed = 0; 2250 2251 if (amdgpu_in_reset(adev)) 2252 return -EPERM; 2253 2254 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2255 if (err < 0) { 2256 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2257 return err; 2258 } 2259 2260 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2261 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 2262 else 2263 err = -EINVAL; 2264 2265 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2266 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2267 2268 if (err) 2269 return err; 2270 2271 return sprintf(buf, "%i\n", speed); 2272 } 2273 2274 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 2275 struct device_attribute *attr, 2276 char *buf) 2277 { 2278 struct amdgpu_device *adev = dev_get_drvdata(dev); 2279 u32 min_rpm = 0; 2280 u32 size = sizeof(min_rpm); 2281 int r; 2282 2283 if (amdgpu_in_reset(adev)) 2284 return -EPERM; 2285 2286 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2287 if (r < 0) { 2288 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2289 return r; 2290 } 2291 2292 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2293 (void *)&min_rpm, &size); 2294 2295 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2296 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2297 2298 if (r) 2299 return r; 2300 2301 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); 2302 } 2303 2304 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 2305 struct device_attribute *attr, 2306 char *buf) 2307 { 2308 struct amdgpu_device *adev = dev_get_drvdata(dev); 2309 u32 max_rpm = 0; 2310 u32 size = sizeof(max_rpm); 2311 int r; 2312 2313 if (amdgpu_in_reset(adev)) 2314 return -EPERM; 2315 2316 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2317 if (r < 0) { 2318 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2319 return r; 2320 } 2321 2322 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2323 (void *)&max_rpm, &size); 2324 2325 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2326 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2327 2328 if (r) 2329 return r; 2330 2331 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); 2332 } 2333 2334 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 2335 struct device_attribute *attr, 2336 char *buf) 2337 { 2338 struct amdgpu_device *adev = dev_get_drvdata(dev); 2339 int err; 2340 u32 rpm = 0; 2341 2342 if (amdgpu_in_reset(adev)) 2343 return -EPERM; 2344 2345 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2346 if (err < 0) { 2347 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2348 return err; 2349 } 2350 2351 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2352 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 2353 else 2354 err = -EINVAL; 2355 2356 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2357 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2358 2359 if (err) 2360 return err; 2361 2362 return sprintf(buf, "%i\n", rpm); 2363 } 2364 2365 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 2366 struct device_attribute *attr, 2367 const char *buf, size_t count) 2368 { 2369 struct amdgpu_device *adev = dev_get_drvdata(dev); 2370 int err; 2371 u32 value; 2372 u32 pwm_mode; 2373 2374 if (amdgpu_in_reset(adev)) 2375 return -EPERM; 2376 2377 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2378 if (err < 0) { 2379 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2380 return err; 2381 } 2382 2383 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2384 2385 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2386 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2387 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2388 return -ENODATA; 2389 } 2390 2391 err = kstrtou32(buf, 10, &value); 2392 if (err) { 2393 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2394 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2395 return err; 2396 } 2397 2398 if (adev->powerplay.pp_funcs->set_fan_speed_rpm) 2399 err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 2400 else 2401 err = -EINVAL; 2402 2403 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2404 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2405 2406 if (err) 2407 return err; 2408 2409 return count; 2410 } 2411 2412 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 2413 struct device_attribute *attr, 2414 char *buf) 2415 { 2416 struct amdgpu_device *adev = dev_get_drvdata(dev); 2417 u32 pwm_mode = 0; 2418 int ret; 2419 2420 if (amdgpu_in_reset(adev)) 2421 return -EPERM; 2422 2423 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2424 if (ret < 0) { 2425 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2426 return ret; 2427 } 2428 2429 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2430 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2431 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2432 return -EINVAL; 2433 } 2434 2435 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2436 2437 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2438 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2439 2440 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 2441 } 2442 2443 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 2444 struct device_attribute *attr, 2445 const char *buf, 2446 size_t count) 2447 { 2448 struct amdgpu_device *adev = dev_get_drvdata(dev); 2449 int err; 2450 int value; 2451 u32 pwm_mode; 2452 2453 if (amdgpu_in_reset(adev)) 2454 return -EPERM; 2455 2456 err = kstrtoint(buf, 10, &value); 2457 if (err) 2458 return err; 2459 2460 if (value == 0) 2461 pwm_mode = AMD_FAN_CTRL_AUTO; 2462 else if (value == 1) 2463 pwm_mode = AMD_FAN_CTRL_MANUAL; 2464 else 2465 return -EINVAL; 2466 2467 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2468 if (err < 0) { 2469 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2470 return err; 2471 } 2472 2473 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2474 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2475 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2476 return -EINVAL; 2477 } 2478 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2479 2480 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2481 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2482 2483 return count; 2484 } 2485 2486 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 2487 struct device_attribute *attr, 2488 char *buf) 2489 { 2490 struct amdgpu_device *adev = dev_get_drvdata(dev); 2491 u32 vddgfx; 2492 int r, size = sizeof(vddgfx); 2493 2494 if (amdgpu_in_reset(adev)) 2495 return -EPERM; 2496 2497 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2498 if (r < 0) { 2499 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2500 return r; 2501 } 2502 2503 /* get the voltage */ 2504 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 2505 (void *)&vddgfx, &size); 2506 2507 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2508 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2509 2510 if (r) 2511 return r; 2512 2513 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); 2514 } 2515 2516 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 2517 struct device_attribute *attr, 2518 char *buf) 2519 { 2520 return snprintf(buf, PAGE_SIZE, "vddgfx\n"); 2521 } 2522 2523 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 2524 struct device_attribute *attr, 2525 char *buf) 2526 { 2527 struct amdgpu_device *adev = dev_get_drvdata(dev); 2528 u32 vddnb; 2529 int r, size = sizeof(vddnb); 2530 2531 if (amdgpu_in_reset(adev)) 2532 return -EPERM; 2533 2534 /* only APUs have vddnb */ 2535 if (!(adev->flags & AMD_IS_APU)) 2536 return -EINVAL; 2537 2538 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2539 if (r < 0) { 2540 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2541 return r; 2542 } 2543 2544 /* get the voltage */ 2545 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 2546 (void *)&vddnb, &size); 2547 2548 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2549 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2550 2551 if (r) 2552 return r; 2553 2554 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); 2555 } 2556 2557 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 2558 struct device_attribute *attr, 2559 char *buf) 2560 { 2561 return snprintf(buf, PAGE_SIZE, "vddnb\n"); 2562 } 2563 2564 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 2565 struct device_attribute *attr, 2566 char *buf) 2567 { 2568 struct amdgpu_device *adev = dev_get_drvdata(dev); 2569 u32 query = 0; 2570 int r, size = sizeof(u32); 2571 unsigned uw; 2572 2573 if (amdgpu_in_reset(adev)) 2574 return -EPERM; 2575 2576 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2577 if (r < 0) { 2578 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2579 return r; 2580 } 2581 2582 /* get the voltage */ 2583 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 2584 (void *)&query, &size); 2585 2586 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2587 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2588 2589 if (r) 2590 return r; 2591 2592 /* convert to microwatts */ 2593 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 2594 2595 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 2596 } 2597 2598 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 2599 struct device_attribute *attr, 2600 char *buf) 2601 { 2602 return sprintf(buf, "%i\n", 0); 2603 } 2604 2605 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 2606 struct device_attribute *attr, 2607 char *buf) 2608 { 2609 struct amdgpu_device *adev = dev_get_drvdata(dev); 2610 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2611 int limit_type = to_sensor_dev_attr(attr)->index; 2612 uint32_t limit = limit_type << 24; 2613 ssize_t size; 2614 int r; 2615 2616 if (amdgpu_in_reset(adev)) 2617 return -EPERM; 2618 2619 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2620 if (r < 0) { 2621 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2622 return r; 2623 } 2624 2625 if (is_support_sw_smu(adev)) { 2626 smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX); 2627 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2628 } else if (pp_funcs && pp_funcs->get_power_limit) { 2629 pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 2630 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2631 } else { 2632 size = snprintf(buf, PAGE_SIZE, "\n"); 2633 } 2634 2635 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2636 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2637 2638 return size; 2639 } 2640 2641 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 2642 struct device_attribute *attr, 2643 char *buf) 2644 { 2645 struct amdgpu_device *adev = dev_get_drvdata(dev); 2646 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2647 int limit_type = to_sensor_dev_attr(attr)->index; 2648 uint32_t limit = limit_type << 24; 2649 ssize_t size; 2650 int r; 2651 2652 if (amdgpu_in_reset(adev)) 2653 return -EPERM; 2654 2655 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2656 if (r < 0) { 2657 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2658 return r; 2659 } 2660 2661 if (is_support_sw_smu(adev)) { 2662 smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT); 2663 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2664 } else if (pp_funcs && pp_funcs->get_power_limit) { 2665 pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 2666 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2667 } else { 2668 size = snprintf(buf, PAGE_SIZE, "\n"); 2669 } 2670 2671 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2672 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2673 2674 return size; 2675 } 2676 2677 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, 2678 struct device_attribute *attr, 2679 char *buf) 2680 { 2681 int limit_type = to_sensor_dev_attr(attr)->index; 2682 2683 return snprintf(buf, PAGE_SIZE, "%s\n", 2684 limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT"); 2685 } 2686 2687 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 2688 struct device_attribute *attr, 2689 const char *buf, 2690 size_t count) 2691 { 2692 struct amdgpu_device *adev = dev_get_drvdata(dev); 2693 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2694 int limit_type = to_sensor_dev_attr(attr)->index; 2695 int err; 2696 u32 value; 2697 2698 if (amdgpu_in_reset(adev)) 2699 return -EPERM; 2700 2701 if (amdgpu_sriov_vf(adev)) 2702 return -EINVAL; 2703 2704 err = kstrtou32(buf, 10, &value); 2705 if (err) 2706 return err; 2707 2708 value = value / 1000000; /* convert to Watt */ 2709 value |= limit_type << 24; 2710 2711 err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2712 if (err < 0) { 2713 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2714 return err; 2715 } 2716 2717 if (pp_funcs && pp_funcs->set_power_limit) 2718 err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 2719 else 2720 err = -EINVAL; 2721 2722 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2723 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2724 2725 if (err) 2726 return err; 2727 2728 return count; 2729 } 2730 2731 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 2732 struct device_attribute *attr, 2733 char *buf) 2734 { 2735 struct amdgpu_device *adev = dev_get_drvdata(dev); 2736 uint32_t sclk; 2737 int r, size = sizeof(sclk); 2738 2739 if (amdgpu_in_reset(adev)) 2740 return -EPERM; 2741 2742 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2743 if (r < 0) { 2744 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2745 return r; 2746 } 2747 2748 /* get the sclk */ 2749 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 2750 (void *)&sclk, &size); 2751 2752 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2753 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2754 2755 if (r) 2756 return r; 2757 2758 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); 2759 } 2760 2761 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 2762 struct device_attribute *attr, 2763 char *buf) 2764 { 2765 return snprintf(buf, PAGE_SIZE, "sclk\n"); 2766 } 2767 2768 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 2769 struct device_attribute *attr, 2770 char *buf) 2771 { 2772 struct amdgpu_device *adev = dev_get_drvdata(dev); 2773 uint32_t mclk; 2774 int r, size = sizeof(mclk); 2775 2776 if (amdgpu_in_reset(adev)) 2777 return -EPERM; 2778 2779 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2780 if (r < 0) { 2781 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2782 return r; 2783 } 2784 2785 /* get the sclk */ 2786 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 2787 (void *)&mclk, &size); 2788 2789 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2790 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2791 2792 if (r) 2793 return r; 2794 2795 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); 2796 } 2797 2798 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 2799 struct device_attribute *attr, 2800 char *buf) 2801 { 2802 return snprintf(buf, PAGE_SIZE, "mclk\n"); 2803 } 2804 2805 /** 2806 * DOC: hwmon 2807 * 2808 * The amdgpu driver exposes the following sensor interfaces: 2809 * 2810 * - GPU temperature (via the on-die sensor) 2811 * 2812 * - GPU voltage 2813 * 2814 * - Northbridge voltage (APUs only) 2815 * 2816 * - GPU power 2817 * 2818 * - GPU fan 2819 * 2820 * - GPU gfx/compute engine clock 2821 * 2822 * - GPU memory clock (dGPU only) 2823 * 2824 * hwmon interfaces for GPU temperature: 2825 * 2826 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 2827 * - temp2_input and temp3_input are supported on SOC15 dGPUs only 2828 * 2829 * - temp[1-3]_label: temperature channel label 2830 * - temp2_label and temp3_label are supported on SOC15 dGPUs only 2831 * 2832 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 2833 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 2834 * 2835 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 2836 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 2837 * 2838 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 2839 * - these are supported on SOC15 dGPUs only 2840 * 2841 * hwmon interfaces for GPU voltage: 2842 * 2843 * - in0_input: the voltage on the GPU in millivolts 2844 * 2845 * - in1_input: the voltage on the Northbridge in millivolts 2846 * 2847 * hwmon interfaces for GPU power: 2848 * 2849 * - power1_average: average power used by the GPU in microWatts 2850 * 2851 * - power1_cap_min: minimum cap supported in microWatts 2852 * 2853 * - power1_cap_max: maximum cap supported in microWatts 2854 * 2855 * - power1_cap: selected power cap in microWatts 2856 * 2857 * hwmon interfaces for GPU fan: 2858 * 2859 * - pwm1: pulse width modulation fan level (0-255) 2860 * 2861 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 2862 * 2863 * - pwm1_min: pulse width modulation fan control minimum level (0) 2864 * 2865 * - pwm1_max: pulse width modulation fan control maximum level (255) 2866 * 2867 * - fan1_min: an minimum value Unit: revolution/min (RPM) 2868 * 2869 * - fan1_max: an maxmum value Unit: revolution/max (RPM) 2870 * 2871 * - fan1_input: fan speed in RPM 2872 * 2873 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) 2874 * 2875 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable 2876 * 2877 * hwmon interfaces for GPU clocks: 2878 * 2879 * - freq1_input: the gfx/compute clock in hertz 2880 * 2881 * - freq2_input: the memory clock in hertz 2882 * 2883 * You can use hwmon tools like sensors to view this information on your system. 2884 * 2885 */ 2886 2887 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 2888 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 2889 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 2890 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 2891 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 2892 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 2893 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 2894 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 2895 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 2896 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 2897 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 2898 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 2899 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 2900 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 2901 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 2902 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 2903 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 2904 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 2905 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 2906 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 2907 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 2908 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 2909 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 2910 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 2911 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 2912 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 2913 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 2914 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 2915 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 2916 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 2917 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 2918 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 2919 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0); 2920 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1); 2921 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1); 2922 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1); 2923 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1); 2924 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1); 2925 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 2926 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 2927 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 2928 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 2929 2930 static struct attribute *hwmon_attributes[] = { 2931 &sensor_dev_attr_temp1_input.dev_attr.attr, 2932 &sensor_dev_attr_temp1_crit.dev_attr.attr, 2933 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 2934 &sensor_dev_attr_temp2_input.dev_attr.attr, 2935 &sensor_dev_attr_temp2_crit.dev_attr.attr, 2936 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 2937 &sensor_dev_attr_temp3_input.dev_attr.attr, 2938 &sensor_dev_attr_temp3_crit.dev_attr.attr, 2939 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 2940 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 2941 &sensor_dev_attr_temp2_emergency.dev_attr.attr, 2942 &sensor_dev_attr_temp3_emergency.dev_attr.attr, 2943 &sensor_dev_attr_temp1_label.dev_attr.attr, 2944 &sensor_dev_attr_temp2_label.dev_attr.attr, 2945 &sensor_dev_attr_temp3_label.dev_attr.attr, 2946 &sensor_dev_attr_pwm1.dev_attr.attr, 2947 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 2948 &sensor_dev_attr_pwm1_min.dev_attr.attr, 2949 &sensor_dev_attr_pwm1_max.dev_attr.attr, 2950 &sensor_dev_attr_fan1_input.dev_attr.attr, 2951 &sensor_dev_attr_fan1_min.dev_attr.attr, 2952 &sensor_dev_attr_fan1_max.dev_attr.attr, 2953 &sensor_dev_attr_fan1_target.dev_attr.attr, 2954 &sensor_dev_attr_fan1_enable.dev_attr.attr, 2955 &sensor_dev_attr_in0_input.dev_attr.attr, 2956 &sensor_dev_attr_in0_label.dev_attr.attr, 2957 &sensor_dev_attr_in1_input.dev_attr.attr, 2958 &sensor_dev_attr_in1_label.dev_attr.attr, 2959 &sensor_dev_attr_power1_average.dev_attr.attr, 2960 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 2961 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 2962 &sensor_dev_attr_power1_cap.dev_attr.attr, 2963 &sensor_dev_attr_power1_label.dev_attr.attr, 2964 &sensor_dev_attr_power2_average.dev_attr.attr, 2965 &sensor_dev_attr_power2_cap_max.dev_attr.attr, 2966 &sensor_dev_attr_power2_cap_min.dev_attr.attr, 2967 &sensor_dev_attr_power2_cap.dev_attr.attr, 2968 &sensor_dev_attr_power2_label.dev_attr.attr, 2969 &sensor_dev_attr_freq1_input.dev_attr.attr, 2970 &sensor_dev_attr_freq1_label.dev_attr.attr, 2971 &sensor_dev_attr_freq2_input.dev_attr.attr, 2972 &sensor_dev_attr_freq2_label.dev_attr.attr, 2973 NULL 2974 }; 2975 2976 static umode_t hwmon_attributes_visible(struct kobject *kobj, 2977 struct attribute *attr, int index) 2978 { 2979 struct device *dev = kobj_to_dev(kobj); 2980 struct amdgpu_device *adev = dev_get_drvdata(dev); 2981 umode_t effective_mode = attr->mode; 2982 2983 /* under multi-vf mode, the hwmon attributes are all not supported */ 2984 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 2985 return 0; 2986 2987 /* there is no fan under pp one vf mode */ 2988 if (amdgpu_sriov_is_pp_one_vf(adev) && 2989 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2990 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2991 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2992 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2993 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2994 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2995 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2996 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2997 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2998 return 0; 2999 3000 /* Skip fan attributes if fan is not present */ 3001 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3002 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3003 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3004 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3005 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3006 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3007 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3008 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3009 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3010 return 0; 3011 3012 /* Skip fan attributes on APU */ 3013 if ((adev->flags & AMD_IS_APU) && 3014 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3015 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3016 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3017 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3018 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3019 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3020 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3021 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3022 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3023 return 0; 3024 3025 /* Skip crit temp on APU */ 3026 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) && 3027 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3028 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 3029 return 0; 3030 3031 /* Skip limit attributes if DPM is not enabled */ 3032 if (!adev->pm.dpm_enabled && 3033 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3034 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 3035 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3036 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3037 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3038 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3039 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3040 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3041 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3042 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3043 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3044 return 0; 3045 3046 if (!is_support_sw_smu(adev)) { 3047 /* mask fan attributes if we have no bindings for this asic to expose */ 3048 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 3049 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 3050 (!adev->powerplay.pp_funcs->get_fan_control_mode && 3051 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 3052 effective_mode &= ~S_IRUGO; 3053 3054 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 3055 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 3056 (!adev->powerplay.pp_funcs->set_fan_control_mode && 3057 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 3058 effective_mode &= ~S_IWUSR; 3059 } 3060 3061 if (((adev->family == AMDGPU_FAMILY_SI) || 3062 ((adev->flags & AMD_IS_APU) && 3063 (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */ 3064 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 3065 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 3066 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 3067 return 0; 3068 3069 if (((adev->family == AMDGPU_FAMILY_SI) || 3070 ((adev->flags & AMD_IS_APU) && 3071 (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */ 3072 (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) 3073 return 0; 3074 3075 if (!is_support_sw_smu(adev)) { 3076 /* hide max/min values if we can't both query and manage the fan */ 3077 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 3078 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 3079 (!adev->powerplay.pp_funcs->set_fan_speed_rpm && 3080 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 3081 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3082 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 3083 return 0; 3084 3085 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && 3086 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 3087 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3088 attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 3089 return 0; 3090 } 3091 3092 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 3093 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 3094 (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 3095 attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 3096 return 0; 3097 3098 /* only APUs have vddnb */ 3099 if (!(adev->flags & AMD_IS_APU) && 3100 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 3101 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 3102 return 0; 3103 3104 /* no mclk on APUs */ 3105 if ((adev->flags & AMD_IS_APU) && 3106 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 3107 attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 3108 return 0; 3109 3110 /* only SOC15 dGPUs support hotspot and mem temperatures */ 3111 if (((adev->flags & AMD_IS_APU) || 3112 adev->asic_type < CHIP_VEGA10) && 3113 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 3114 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 3115 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || 3116 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 3117 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 3118 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 3119 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || 3120 attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 3121 attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 3122 attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 3123 attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) 3124 return 0; 3125 3126 /* only Vangogh has fast PPT limit and power labels */ 3127 if (!(adev->asic_type == CHIP_VANGOGH) && 3128 (attr == &sensor_dev_attr_power2_average.dev_attr.attr || 3129 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || 3130 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || 3131 attr == &sensor_dev_attr_power2_cap.dev_attr.attr || 3132 attr == &sensor_dev_attr_power2_label.dev_attr.attr || 3133 attr == &sensor_dev_attr_power1_label.dev_attr.attr)) 3134 return 0; 3135 3136 return effective_mode; 3137 } 3138 3139 static const struct attribute_group hwmon_attrgroup = { 3140 .attrs = hwmon_attributes, 3141 .is_visible = hwmon_attributes_visible, 3142 }; 3143 3144 static const struct attribute_group *hwmon_groups[] = { 3145 &hwmon_attrgroup, 3146 NULL 3147 }; 3148 3149 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 3150 { 3151 int ret; 3152 uint32_t mask = 0; 3153 3154 if (adev->pm.sysfs_initialized) 3155 return 0; 3156 3157 if (adev->pm.dpm_enabled == 0) 3158 return 0; 3159 3160 INIT_LIST_HEAD(&adev->pm.pm_attr_list); 3161 3162 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 3163 DRIVER_NAME, adev, 3164 hwmon_groups); 3165 if (IS_ERR(adev->pm.int_hwmon_dev)) { 3166 ret = PTR_ERR(adev->pm.int_hwmon_dev); 3167 dev_err(adev->dev, 3168 "Unable to register hwmon device: %d\n", ret); 3169 return ret; 3170 } 3171 3172 switch (amdgpu_virt_get_sriov_vf_mode(adev)) { 3173 case SRIOV_VF_MODE_ONE_VF: 3174 mask = ATTR_FLAG_ONEVF; 3175 break; 3176 case SRIOV_VF_MODE_MULTI_VF: 3177 mask = 0; 3178 break; 3179 case SRIOV_VF_MODE_BARE_METAL: 3180 default: 3181 mask = ATTR_FLAG_MASK_ALL; 3182 break; 3183 } 3184 3185 ret = amdgpu_device_attr_create_groups(adev, 3186 amdgpu_device_attrs, 3187 ARRAY_SIZE(amdgpu_device_attrs), 3188 mask, 3189 &adev->pm.pm_attr_list); 3190 if (ret) 3191 return ret; 3192 3193 adev->pm.sysfs_initialized = true; 3194 3195 return 0; 3196 } 3197 3198 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 3199 { 3200 if (adev->pm.dpm_enabled == 0) 3201 return; 3202 3203 if (adev->pm.int_hwmon_dev) 3204 hwmon_device_unregister(adev->pm.int_hwmon_dev); 3205 3206 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); 3207 } 3208 3209 /* 3210 * Debugfs info 3211 */ 3212 #if defined(CONFIG_DEBUG_FS) 3213 3214 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m, 3215 struct amdgpu_device *adev) { 3216 uint16_t *p_val; 3217 uint32_t size; 3218 int i; 3219 3220 if (is_support_cclk_dpm(adev)) { 3221 p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t), 3222 GFP_KERNEL); 3223 3224 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK, 3225 (void *)p_val, &size)) { 3226 for (i = 0; i < adev->smu.cpu_core_num; i++) 3227 seq_printf(m, "\t%u MHz (CPU%d)\n", 3228 *(p_val + i), i); 3229 } 3230 3231 kfree(p_val); 3232 } 3233 } 3234 3235 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 3236 { 3237 uint32_t value; 3238 uint64_t value64 = 0; 3239 uint32_t query = 0; 3240 int size; 3241 3242 /* GPU Clocks */ 3243 size = sizeof(value); 3244 seq_printf(m, "GFX Clocks and Power:\n"); 3245 3246 amdgpu_debugfs_prints_cpu_info(m, adev); 3247 3248 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 3249 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 3250 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 3251 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 3252 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 3253 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 3254 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 3255 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 3256 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 3257 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 3258 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 3259 seq_printf(m, "\t%u mV (VDDNB)\n", value); 3260 size = sizeof(uint32_t); 3261 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) 3262 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 3263 size = sizeof(value); 3264 seq_printf(m, "\n"); 3265 3266 /* GPU Temp */ 3267 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 3268 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 3269 3270 /* GPU Load */ 3271 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 3272 seq_printf(m, "GPU Load: %u %%\n", value); 3273 /* MEM Load */ 3274 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 3275 seq_printf(m, "MEM Load: %u %%\n", value); 3276 3277 seq_printf(m, "\n"); 3278 3279 /* SMC feature mask */ 3280 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3281 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3282 3283 if (adev->asic_type > CHIP_VEGA20) { 3284 /* VCN clocks */ 3285 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 3286 if (!value) { 3287 seq_printf(m, "VCN: Disabled\n"); 3288 } else { 3289 seq_printf(m, "VCN: Enabled\n"); 3290 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3291 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3292 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3293 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3294 } 3295 } 3296 seq_printf(m, "\n"); 3297 } else { 3298 /* UVD clocks */ 3299 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3300 if (!value) { 3301 seq_printf(m, "UVD: Disabled\n"); 3302 } else { 3303 seq_printf(m, "UVD: Enabled\n"); 3304 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3305 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3306 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3307 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3308 } 3309 } 3310 seq_printf(m, "\n"); 3311 3312 /* VCE clocks */ 3313 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3314 if (!value) { 3315 seq_printf(m, "VCE: Disabled\n"); 3316 } else { 3317 seq_printf(m, "VCE: Enabled\n"); 3318 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3319 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3320 } 3321 } 3322 } 3323 3324 return 0; 3325 } 3326 3327 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 3328 { 3329 int i; 3330 3331 for (i = 0; clocks[i].flag; i++) 3332 seq_printf(m, "\t%s: %s\n", clocks[i].name, 3333 (flags & clocks[i].flag) ? "On" : "Off"); 3334 } 3335 3336 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) 3337 { 3338 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 3339 struct drm_device *dev = adev_to_drm(adev); 3340 u32 flags = 0; 3341 int r; 3342 3343 if (amdgpu_in_reset(adev)) 3344 return -EPERM; 3345 3346 r = pm_runtime_get_sync(dev->dev); 3347 if (r < 0) { 3348 pm_runtime_put_autosuspend(dev->dev); 3349 return r; 3350 } 3351 3352 if (!adev->pm.dpm_enabled) { 3353 seq_printf(m, "dpm not enabled\n"); 3354 pm_runtime_mark_last_busy(dev->dev); 3355 pm_runtime_put_autosuspend(dev->dev); 3356 return 0; 3357 } 3358 3359 if (!is_support_sw_smu(adev) && 3360 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3361 mutex_lock(&adev->pm.mutex); 3362 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 3363 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 3364 else 3365 seq_printf(m, "Debugfs support not implemented for this asic\n"); 3366 mutex_unlock(&adev->pm.mutex); 3367 r = 0; 3368 } else { 3369 r = amdgpu_debugfs_pm_info_pp(m, adev); 3370 } 3371 if (r) 3372 goto out; 3373 3374 amdgpu_device_ip_get_clockgating_state(adev, &flags); 3375 3376 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 3377 amdgpu_parse_cg_state(m, flags); 3378 seq_printf(m, "\n"); 3379 3380 out: 3381 pm_runtime_mark_last_busy(dev->dev); 3382 pm_runtime_put_autosuspend(dev->dev); 3383 3384 return r; 3385 } 3386 3387 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info); 3388 3389 #endif 3390 3391 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 3392 { 3393 #if defined(CONFIG_DEBUG_FS) 3394 struct drm_minor *minor = adev_to_drm(adev)->primary; 3395 struct dentry *root = minor->debugfs_root; 3396 3397 debugfs_create_file("amdgpu_pm_info", 0444, root, adev, 3398 &amdgpu_debugfs_pm_info_fops); 3399 3400 #endif 3401 } 3402