xref: /openbmc/linux/drivers/gpu/drm/amd/pm/amdgpu_pm.c (revision 8dd3cdea)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <zajec5@gmail.com>
23  *          Alex Deucher <alexdeucher@gmail.com>
24  */
25 
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
37 
38 static const struct cg_flag_name clocks[] = {
39 	{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
40 	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
41 	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
42 	{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
43 	{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
44 	{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
45 	{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
46 	{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
47 	{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
48 	{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
49 	{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
50 	{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
51 	{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
52 	{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
53 	{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
54 	{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
55 	{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
56 	{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
57 	{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
58 	{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
59 	{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
60 	{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
61 	{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
62 	{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
63 	{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
64 	{AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
65 	{AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
66 	{AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
67 	{AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
68 	{AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
69 
70 	{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
71 	{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
72 	{0, NULL},
73 };
74 
75 static const struct hwmon_temp_label {
76 	enum PP_HWMON_TEMP channel;
77 	const char *label;
78 } temp_label[] = {
79 	{PP_TEMP_EDGE, "edge"},
80 	{PP_TEMP_JUNCTION, "junction"},
81 	{PP_TEMP_MEM, "mem"},
82 };
83 
84 const char * const amdgpu_pp_profile_name[] = {
85 	"BOOTUP_DEFAULT",
86 	"3D_FULL_SCREEN",
87 	"POWER_SAVING",
88 	"VIDEO",
89 	"VR",
90 	"COMPUTE",
91 	"CUSTOM"
92 };
93 
94 /**
95  * DOC: power_dpm_state
96  *
97  * The power_dpm_state file is a legacy interface and is only provided for
98  * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
99  * certain power related parameters.  The file power_dpm_state is used for this.
100  * It accepts the following arguments:
101  *
102  * - battery
103  *
104  * - balanced
105  *
106  * - performance
107  *
108  * battery
109  *
110  * On older GPUs, the vbios provided a special power state for battery
111  * operation.  Selecting battery switched to this state.  This is no
112  * longer provided on newer GPUs so the option does nothing in that case.
113  *
114  * balanced
115  *
116  * On older GPUs, the vbios provided a special power state for balanced
117  * operation.  Selecting balanced switched to this state.  This is no
118  * longer provided on newer GPUs so the option does nothing in that case.
119  *
120  * performance
121  *
122  * On older GPUs, the vbios provided a special power state for performance
123  * operation.  Selecting performance switched to this state.  This is no
124  * longer provided on newer GPUs so the option does nothing in that case.
125  *
126  */
127 
128 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
129 					  struct device_attribute *attr,
130 					  char *buf)
131 {
132 	struct drm_device *ddev = dev_get_drvdata(dev);
133 	struct amdgpu_device *adev = drm_to_adev(ddev);
134 	enum amd_pm_state_type pm;
135 	int ret;
136 
137 	if (amdgpu_in_reset(adev))
138 		return -EPERM;
139 	if (adev->in_suspend && !adev->in_runpm)
140 		return -EPERM;
141 
142 	ret = pm_runtime_get_sync(ddev->dev);
143 	if (ret < 0) {
144 		pm_runtime_put_autosuspend(ddev->dev);
145 		return ret;
146 	}
147 
148 	amdgpu_dpm_get_current_power_state(adev, &pm);
149 
150 	pm_runtime_mark_last_busy(ddev->dev);
151 	pm_runtime_put_autosuspend(ddev->dev);
152 
153 	return sysfs_emit(buf, "%s\n",
154 			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
155 			  (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
156 }
157 
158 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
159 					  struct device_attribute *attr,
160 					  const char *buf,
161 					  size_t count)
162 {
163 	struct drm_device *ddev = dev_get_drvdata(dev);
164 	struct amdgpu_device *adev = drm_to_adev(ddev);
165 	enum amd_pm_state_type  state;
166 	int ret;
167 
168 	if (amdgpu_in_reset(adev))
169 		return -EPERM;
170 	if (adev->in_suspend && !adev->in_runpm)
171 		return -EPERM;
172 
173 	if (strncmp("battery", buf, strlen("battery")) == 0)
174 		state = POWER_STATE_TYPE_BATTERY;
175 	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
176 		state = POWER_STATE_TYPE_BALANCED;
177 	else if (strncmp("performance", buf, strlen("performance")) == 0)
178 		state = POWER_STATE_TYPE_PERFORMANCE;
179 	else
180 		return -EINVAL;
181 
182 	ret = pm_runtime_get_sync(ddev->dev);
183 	if (ret < 0) {
184 		pm_runtime_put_autosuspend(ddev->dev);
185 		return ret;
186 	}
187 
188 	amdgpu_dpm_set_power_state(adev, state);
189 
190 	pm_runtime_mark_last_busy(ddev->dev);
191 	pm_runtime_put_autosuspend(ddev->dev);
192 
193 	return count;
194 }
195 
196 
197 /**
198  * DOC: power_dpm_force_performance_level
199  *
200  * The amdgpu driver provides a sysfs API for adjusting certain power
201  * related parameters.  The file power_dpm_force_performance_level is
202  * used for this.  It accepts the following arguments:
203  *
204  * - auto
205  *
206  * - low
207  *
208  * - high
209  *
210  * - manual
211  *
212  * - profile_standard
213  *
214  * - profile_min_sclk
215  *
216  * - profile_min_mclk
217  *
218  * - profile_peak
219  *
220  * auto
221  *
222  * When auto is selected, the driver will attempt to dynamically select
223  * the optimal power profile for current conditions in the driver.
224  *
225  * low
226  *
227  * When low is selected, the clocks are forced to the lowest power state.
228  *
229  * high
230  *
231  * When high is selected, the clocks are forced to the highest power state.
232  *
233  * manual
234  *
235  * When manual is selected, the user can manually adjust which power states
236  * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
237  * and pp_dpm_pcie files and adjust the power state transition heuristics
238  * via the pp_power_profile_mode sysfs file.
239  *
240  * profile_standard
241  * profile_min_sclk
242  * profile_min_mclk
243  * profile_peak
244  *
245  * When the profiling modes are selected, clock and power gating are
246  * disabled and the clocks are set for different profiling cases. This
247  * mode is recommended for profiling specific work loads where you do
248  * not want clock or power gating for clock fluctuation to interfere
249  * with your results. profile_standard sets the clocks to a fixed clock
250  * level which varies from asic to asic.  profile_min_sclk forces the sclk
251  * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
252  * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
253  *
254  */
255 
256 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
257 							    struct device_attribute *attr,
258 							    char *buf)
259 {
260 	struct drm_device *ddev = dev_get_drvdata(dev);
261 	struct amdgpu_device *adev = drm_to_adev(ddev);
262 	enum amd_dpm_forced_level level = 0xff;
263 	int ret;
264 
265 	if (amdgpu_in_reset(adev))
266 		return -EPERM;
267 	if (adev->in_suspend && !adev->in_runpm)
268 		return -EPERM;
269 
270 	ret = pm_runtime_get_sync(ddev->dev);
271 	if (ret < 0) {
272 		pm_runtime_put_autosuspend(ddev->dev);
273 		return ret;
274 	}
275 
276 	level = amdgpu_dpm_get_performance_level(adev);
277 
278 	pm_runtime_mark_last_busy(ddev->dev);
279 	pm_runtime_put_autosuspend(ddev->dev);
280 
281 	return sysfs_emit(buf, "%s\n",
282 			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
283 			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
284 			  (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
285 			  (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
286 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
287 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
288 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
289 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
290 			  (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
291 			  "unknown");
292 }
293 
294 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
295 							    struct device_attribute *attr,
296 							    const char *buf,
297 							    size_t count)
298 {
299 	struct drm_device *ddev = dev_get_drvdata(dev);
300 	struct amdgpu_device *adev = drm_to_adev(ddev);
301 	enum amd_dpm_forced_level level;
302 	int ret = 0;
303 
304 	if (amdgpu_in_reset(adev))
305 		return -EPERM;
306 	if (adev->in_suspend && !adev->in_runpm)
307 		return -EPERM;
308 
309 	if (strncmp("low", buf, strlen("low")) == 0) {
310 		level = AMD_DPM_FORCED_LEVEL_LOW;
311 	} else if (strncmp("high", buf, strlen("high")) == 0) {
312 		level = AMD_DPM_FORCED_LEVEL_HIGH;
313 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
314 		level = AMD_DPM_FORCED_LEVEL_AUTO;
315 	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
316 		level = AMD_DPM_FORCED_LEVEL_MANUAL;
317 	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
318 		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
319 	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
320 		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
321 	} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
322 		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
323 	} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
324 		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
325 	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
326 		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
327 	} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
328 		level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
329 	}  else {
330 		return -EINVAL;
331 	}
332 
333 	ret = pm_runtime_get_sync(ddev->dev);
334 	if (ret < 0) {
335 		pm_runtime_put_autosuspend(ddev->dev);
336 		return ret;
337 	}
338 
339 	mutex_lock(&adev->pm.stable_pstate_ctx_lock);
340 	if (amdgpu_dpm_force_performance_level(adev, level)) {
341 		pm_runtime_mark_last_busy(ddev->dev);
342 		pm_runtime_put_autosuspend(ddev->dev);
343 		mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
344 		return -EINVAL;
345 	}
346 	/* override whatever a user ctx may have set */
347 	adev->pm.stable_pstate_ctx = NULL;
348 	mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
349 
350 	pm_runtime_mark_last_busy(ddev->dev);
351 	pm_runtime_put_autosuspend(ddev->dev);
352 
353 	return count;
354 }
355 
356 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
357 		struct device_attribute *attr,
358 		char *buf)
359 {
360 	struct drm_device *ddev = dev_get_drvdata(dev);
361 	struct amdgpu_device *adev = drm_to_adev(ddev);
362 	struct pp_states_info data;
363 	uint32_t i;
364 	int buf_len, ret;
365 
366 	if (amdgpu_in_reset(adev))
367 		return -EPERM;
368 	if (adev->in_suspend && !adev->in_runpm)
369 		return -EPERM;
370 
371 	ret = pm_runtime_get_sync(ddev->dev);
372 	if (ret < 0) {
373 		pm_runtime_put_autosuspend(ddev->dev);
374 		return ret;
375 	}
376 
377 	if (amdgpu_dpm_get_pp_num_states(adev, &data))
378 		memset(&data, 0, sizeof(data));
379 
380 	pm_runtime_mark_last_busy(ddev->dev);
381 	pm_runtime_put_autosuspend(ddev->dev);
382 
383 	buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
384 	for (i = 0; i < data.nums; i++)
385 		buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
386 				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
387 				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
388 				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
389 				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
390 
391 	return buf_len;
392 }
393 
394 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
395 		struct device_attribute *attr,
396 		char *buf)
397 {
398 	struct drm_device *ddev = dev_get_drvdata(dev);
399 	struct amdgpu_device *adev = drm_to_adev(ddev);
400 	struct pp_states_info data = {0};
401 	enum amd_pm_state_type pm = 0;
402 	int i = 0, ret = 0;
403 
404 	if (amdgpu_in_reset(adev))
405 		return -EPERM;
406 	if (adev->in_suspend && !adev->in_runpm)
407 		return -EPERM;
408 
409 	ret = pm_runtime_get_sync(ddev->dev);
410 	if (ret < 0) {
411 		pm_runtime_put_autosuspend(ddev->dev);
412 		return ret;
413 	}
414 
415 	amdgpu_dpm_get_current_power_state(adev, &pm);
416 
417 	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
418 
419 	pm_runtime_mark_last_busy(ddev->dev);
420 	pm_runtime_put_autosuspend(ddev->dev);
421 
422 	if (ret)
423 		return ret;
424 
425 	for (i = 0; i < data.nums; i++) {
426 		if (pm == data.states[i])
427 			break;
428 	}
429 
430 	if (i == data.nums)
431 		i = -EINVAL;
432 
433 	return sysfs_emit(buf, "%d\n", i);
434 }
435 
436 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
437 		struct device_attribute *attr,
438 		char *buf)
439 {
440 	struct drm_device *ddev = dev_get_drvdata(dev);
441 	struct amdgpu_device *adev = drm_to_adev(ddev);
442 
443 	if (amdgpu_in_reset(adev))
444 		return -EPERM;
445 	if (adev->in_suspend && !adev->in_runpm)
446 		return -EPERM;
447 
448 	if (adev->pm.pp_force_state_enabled)
449 		return amdgpu_get_pp_cur_state(dev, attr, buf);
450 	else
451 		return sysfs_emit(buf, "\n");
452 }
453 
454 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
455 		struct device_attribute *attr,
456 		const char *buf,
457 		size_t count)
458 {
459 	struct drm_device *ddev = dev_get_drvdata(dev);
460 	struct amdgpu_device *adev = drm_to_adev(ddev);
461 	enum amd_pm_state_type state = 0;
462 	struct pp_states_info data;
463 	unsigned long idx;
464 	int ret;
465 
466 	if (amdgpu_in_reset(adev))
467 		return -EPERM;
468 	if (adev->in_suspend && !adev->in_runpm)
469 		return -EPERM;
470 
471 	adev->pm.pp_force_state_enabled = false;
472 
473 	if (strlen(buf) == 1)
474 		return count;
475 
476 	ret = kstrtoul(buf, 0, &idx);
477 	if (ret || idx >= ARRAY_SIZE(data.states))
478 		return -EINVAL;
479 
480 	idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
481 
482 	ret = pm_runtime_get_sync(ddev->dev);
483 	if (ret < 0) {
484 		pm_runtime_put_autosuspend(ddev->dev);
485 		return ret;
486 	}
487 
488 	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
489 	if (ret)
490 		goto err_out;
491 
492 	state = data.states[idx];
493 
494 	/* only set user selected power states */
495 	if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
496 	    state != POWER_STATE_TYPE_DEFAULT) {
497 		ret = amdgpu_dpm_dispatch_task(adev,
498 				AMD_PP_TASK_ENABLE_USER_STATE, &state);
499 		if (ret)
500 			goto err_out;
501 
502 		adev->pm.pp_force_state_enabled = true;
503 	}
504 
505 	pm_runtime_mark_last_busy(ddev->dev);
506 	pm_runtime_put_autosuspend(ddev->dev);
507 
508 	return count;
509 
510 err_out:
511 	pm_runtime_mark_last_busy(ddev->dev);
512 	pm_runtime_put_autosuspend(ddev->dev);
513 	return ret;
514 }
515 
516 /**
517  * DOC: pp_table
518  *
519  * The amdgpu driver provides a sysfs API for uploading new powerplay
520  * tables.  The file pp_table is used for this.  Reading the file
521  * will dump the current power play table.  Writing to the file
522  * will attempt to upload a new powerplay table and re-initialize
523  * powerplay using that new table.
524  *
525  */
526 
527 static ssize_t amdgpu_get_pp_table(struct device *dev,
528 		struct device_attribute *attr,
529 		char *buf)
530 {
531 	struct drm_device *ddev = dev_get_drvdata(dev);
532 	struct amdgpu_device *adev = drm_to_adev(ddev);
533 	char *table = NULL;
534 	int size, ret;
535 
536 	if (amdgpu_in_reset(adev))
537 		return -EPERM;
538 	if (adev->in_suspend && !adev->in_runpm)
539 		return -EPERM;
540 
541 	ret = pm_runtime_get_sync(ddev->dev);
542 	if (ret < 0) {
543 		pm_runtime_put_autosuspend(ddev->dev);
544 		return ret;
545 	}
546 
547 	size = amdgpu_dpm_get_pp_table(adev, &table);
548 
549 	pm_runtime_mark_last_busy(ddev->dev);
550 	pm_runtime_put_autosuspend(ddev->dev);
551 
552 	if (size <= 0)
553 		return size;
554 
555 	if (size >= PAGE_SIZE)
556 		size = PAGE_SIZE - 1;
557 
558 	memcpy(buf, table, size);
559 
560 	return size;
561 }
562 
563 static ssize_t amdgpu_set_pp_table(struct device *dev,
564 		struct device_attribute *attr,
565 		const char *buf,
566 		size_t count)
567 {
568 	struct drm_device *ddev = dev_get_drvdata(dev);
569 	struct amdgpu_device *adev = drm_to_adev(ddev);
570 	int ret = 0;
571 
572 	if (amdgpu_in_reset(adev))
573 		return -EPERM;
574 	if (adev->in_suspend && !adev->in_runpm)
575 		return -EPERM;
576 
577 	ret = pm_runtime_get_sync(ddev->dev);
578 	if (ret < 0) {
579 		pm_runtime_put_autosuspend(ddev->dev);
580 		return ret;
581 	}
582 
583 	ret = amdgpu_dpm_set_pp_table(adev, buf, count);
584 
585 	pm_runtime_mark_last_busy(ddev->dev);
586 	pm_runtime_put_autosuspend(ddev->dev);
587 
588 	if (ret)
589 		return ret;
590 
591 	return count;
592 }
593 
594 /**
595  * DOC: pp_od_clk_voltage
596  *
597  * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
598  * in each power level within a power state.  The pp_od_clk_voltage is used for
599  * this.
600  *
601  * Note that the actual memory controller clock rate are exposed, not
602  * the effective memory clock of the DRAMs. To translate it, use the
603  * following formula:
604  *
605  * Clock conversion (Mhz):
606  *
607  * HBM: effective_memory_clock = memory_controller_clock * 1
608  *
609  * G5: effective_memory_clock = memory_controller_clock * 1
610  *
611  * G6: effective_memory_clock = memory_controller_clock * 2
612  *
613  * DRAM data rate (MT/s):
614  *
615  * HBM: effective_memory_clock * 2 = data_rate
616  *
617  * G5: effective_memory_clock * 4 = data_rate
618  *
619  * G6: effective_memory_clock * 8 = data_rate
620  *
621  * Bandwidth (MB/s):
622  *
623  * data_rate * vram_bit_width / 8 = memory_bandwidth
624  *
625  * Some examples:
626  *
627  * G5 on RX460:
628  *
629  * memory_controller_clock = 1750 Mhz
630  *
631  * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
632  *
633  * data rate = 1750 * 4 = 7000 MT/s
634  *
635  * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
636  *
637  * G6 on RX5700:
638  *
639  * memory_controller_clock = 875 Mhz
640  *
641  * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
642  *
643  * data rate = 1750 * 8 = 14000 MT/s
644  *
645  * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
646  *
647  * < For Vega10 and previous ASICs >
648  *
649  * Reading the file will display:
650  *
651  * - a list of engine clock levels and voltages labeled OD_SCLK
652  *
653  * - a list of memory clock levels and voltages labeled OD_MCLK
654  *
655  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
656  *
657  * To manually adjust these settings, first select manual using
658  * power_dpm_force_performance_level. Enter a new value for each
659  * level by writing a string that contains "s/m level clock voltage" to
660  * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
661  * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
662  * 810 mV.  When you have edited all of the states as needed, write
663  * "c" (commit) to the file to commit your changes.  If you want to reset to the
664  * default power levels, write "r" (reset) to the file to reset them.
665  *
666  *
667  * < For Vega20 and newer ASICs >
668  *
669  * Reading the file will display:
670  *
671  * - minimum and maximum engine clock labeled OD_SCLK
672  *
673  * - minimum(not available for Vega20 and Navi1x) and maximum memory
674  *   clock labeled OD_MCLK
675  *
676  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
677  *   They can be used to calibrate the sclk voltage curve.
678  *
679  * - voltage offset(in mV) applied on target voltage calculation.
680  *   This is available for Sienna Cichlid, Navy Flounder and Dimgrey
681  *   Cavefish. For these ASICs, the target voltage calculation can be
682  *   illustrated by "voltage = voltage calculated from v/f curve +
683  *   overdrive vddgfx offset"
684  *
685  * - a list of valid ranges for sclk, mclk, and voltage curve points
686  *   labeled OD_RANGE
687  *
688  * < For APUs >
689  *
690  * Reading the file will display:
691  *
692  * - minimum and maximum engine clock labeled OD_SCLK
693  *
694  * - a list of valid ranges for sclk labeled OD_RANGE
695  *
696  * < For VanGogh >
697  *
698  * Reading the file will display:
699  *
700  * - minimum and maximum engine clock labeled OD_SCLK
701  * - minimum and maximum core clocks labeled OD_CCLK
702  *
703  * - a list of valid ranges for sclk and cclk labeled OD_RANGE
704  *
705  * To manually adjust these settings:
706  *
707  * - First select manual using power_dpm_force_performance_level
708  *
709  * - For clock frequency setting, enter a new value by writing a
710  *   string that contains "s/m index clock" to the file. The index
711  *   should be 0 if to set minimum clock. And 1 if to set maximum
712  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
713  *   "m 1 800" will update maximum mclk to be 800Mhz. For core
714  *   clocks on VanGogh, the string contains "p core index clock".
715  *   E.g., "p 2 0 800" would set the minimum core clock on core
716  *   2 to 800Mhz.
717  *
718  *   For sclk voltage curve, enter the new values by writing a
719  *   string that contains "vc point clock voltage" to the file. The
720  *   points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
721  *   update point1 with clock set as 300Mhz and voltage as
722  *   600mV. "vc 2 1000 1000" will update point3 with clock set
723  *   as 1000Mhz and voltage 1000mV.
724  *
725  *   To update the voltage offset applied for gfxclk/voltage calculation,
726  *   enter the new value by writing a string that contains "vo offset".
727  *   This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
728  *   And the offset can be a positive or negative value.
729  *
730  * - When you have edited all of the states as needed, write "c" (commit)
731  *   to the file to commit your changes
732  *
733  * - If you want to reset to the default power levels, write "r" (reset)
734  *   to the file to reset them
735  *
736  */
737 
738 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
739 		struct device_attribute *attr,
740 		const char *buf,
741 		size_t count)
742 {
743 	struct drm_device *ddev = dev_get_drvdata(dev);
744 	struct amdgpu_device *adev = drm_to_adev(ddev);
745 	int ret;
746 	uint32_t parameter_size = 0;
747 	long parameter[64];
748 	char buf_cpy[128];
749 	char *tmp_str;
750 	char *sub_str;
751 	const char delimiter[3] = {' ', '\n', '\0'};
752 	uint32_t type;
753 
754 	if (amdgpu_in_reset(adev))
755 		return -EPERM;
756 	if (adev->in_suspend && !adev->in_runpm)
757 		return -EPERM;
758 
759 	if (count > 127)
760 		return -EINVAL;
761 
762 	if (*buf == 's')
763 		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
764 	else if (*buf == 'p')
765 		type = PP_OD_EDIT_CCLK_VDDC_TABLE;
766 	else if (*buf == 'm')
767 		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
768 	else if(*buf == 'r')
769 		type = PP_OD_RESTORE_DEFAULT_TABLE;
770 	else if (*buf == 'c')
771 		type = PP_OD_COMMIT_DPM_TABLE;
772 	else if (!strncmp(buf, "vc", 2))
773 		type = PP_OD_EDIT_VDDC_CURVE;
774 	else if (!strncmp(buf, "vo", 2))
775 		type = PP_OD_EDIT_VDDGFX_OFFSET;
776 	else
777 		return -EINVAL;
778 
779 	memcpy(buf_cpy, buf, count+1);
780 
781 	tmp_str = buf_cpy;
782 
783 	if ((type == PP_OD_EDIT_VDDC_CURVE) ||
784 	     (type == PP_OD_EDIT_VDDGFX_OFFSET))
785 		tmp_str++;
786 	while (isspace(*++tmp_str));
787 
788 	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
789 		if (strlen(sub_str) == 0)
790 			continue;
791 		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
792 		if (ret)
793 			return -EINVAL;
794 		parameter_size++;
795 
796 		while (isspace(*tmp_str))
797 			tmp_str++;
798 	}
799 
800 	ret = pm_runtime_get_sync(ddev->dev);
801 	if (ret < 0) {
802 		pm_runtime_put_autosuspend(ddev->dev);
803 		return ret;
804 	}
805 
806 	if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
807 					      type,
808 					      parameter,
809 					      parameter_size))
810 		goto err_out;
811 
812 	if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
813 					  parameter, parameter_size))
814 		goto err_out;
815 
816 	if (type == PP_OD_COMMIT_DPM_TABLE) {
817 		if (amdgpu_dpm_dispatch_task(adev,
818 					     AMD_PP_TASK_READJUST_POWER_STATE,
819 					     NULL))
820 			goto err_out;
821 	}
822 
823 	pm_runtime_mark_last_busy(ddev->dev);
824 	pm_runtime_put_autosuspend(ddev->dev);
825 
826 	return count;
827 
828 err_out:
829 	pm_runtime_mark_last_busy(ddev->dev);
830 	pm_runtime_put_autosuspend(ddev->dev);
831 	return -EINVAL;
832 }
833 
834 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
835 		struct device_attribute *attr,
836 		char *buf)
837 {
838 	struct drm_device *ddev = dev_get_drvdata(dev);
839 	struct amdgpu_device *adev = drm_to_adev(ddev);
840 	int size = 0;
841 	int ret;
842 	enum pp_clock_type od_clocks[6] = {
843 		OD_SCLK,
844 		OD_MCLK,
845 		OD_VDDC_CURVE,
846 		OD_RANGE,
847 		OD_VDDGFX_OFFSET,
848 		OD_CCLK,
849 	};
850 	uint clk_index;
851 
852 	if (amdgpu_in_reset(adev))
853 		return -EPERM;
854 	if (adev->in_suspend && !adev->in_runpm)
855 		return -EPERM;
856 
857 	ret = pm_runtime_get_sync(ddev->dev);
858 	if (ret < 0) {
859 		pm_runtime_put_autosuspend(ddev->dev);
860 		return ret;
861 	}
862 
863 	for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
864 		ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
865 		if (ret)
866 			break;
867 	}
868 	if (ret == -ENOENT) {
869 		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
870 		if (size > 0) {
871 			size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
872 			size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
873 			size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
874 			size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
875 			size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
876 		}
877 	}
878 
879 	if (size == 0)
880 		size = sysfs_emit(buf, "\n");
881 
882 	pm_runtime_mark_last_busy(ddev->dev);
883 	pm_runtime_put_autosuspend(ddev->dev);
884 
885 	return size;
886 }
887 
888 /**
889  * DOC: pp_features
890  *
891  * The amdgpu driver provides a sysfs API for adjusting what powerplay
892  * features to be enabled. The file pp_features is used for this. And
893  * this is only available for Vega10 and later dGPUs.
894  *
895  * Reading back the file will show you the followings:
896  * - Current ppfeature masks
897  * - List of the all supported powerplay features with their naming,
898  *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
899  *
900  * To manually enable or disable a specific feature, just set or clear
901  * the corresponding bit from original ppfeature masks and input the
902  * new ppfeature masks.
903  */
904 static ssize_t amdgpu_set_pp_features(struct device *dev,
905 				      struct device_attribute *attr,
906 				      const char *buf,
907 				      size_t count)
908 {
909 	struct drm_device *ddev = dev_get_drvdata(dev);
910 	struct amdgpu_device *adev = drm_to_adev(ddev);
911 	uint64_t featuremask;
912 	int ret;
913 
914 	if (amdgpu_in_reset(adev))
915 		return -EPERM;
916 	if (adev->in_suspend && !adev->in_runpm)
917 		return -EPERM;
918 
919 	ret = kstrtou64(buf, 0, &featuremask);
920 	if (ret)
921 		return -EINVAL;
922 
923 	ret = pm_runtime_get_sync(ddev->dev);
924 	if (ret < 0) {
925 		pm_runtime_put_autosuspend(ddev->dev);
926 		return ret;
927 	}
928 
929 	ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
930 
931 	pm_runtime_mark_last_busy(ddev->dev);
932 	pm_runtime_put_autosuspend(ddev->dev);
933 
934 	if (ret)
935 		return -EINVAL;
936 
937 	return count;
938 }
939 
940 static ssize_t amdgpu_get_pp_features(struct device *dev,
941 				      struct device_attribute *attr,
942 				      char *buf)
943 {
944 	struct drm_device *ddev = dev_get_drvdata(dev);
945 	struct amdgpu_device *adev = drm_to_adev(ddev);
946 	ssize_t size;
947 	int ret;
948 
949 	if (amdgpu_in_reset(adev))
950 		return -EPERM;
951 	if (adev->in_suspend && !adev->in_runpm)
952 		return -EPERM;
953 
954 	ret = pm_runtime_get_sync(ddev->dev);
955 	if (ret < 0) {
956 		pm_runtime_put_autosuspend(ddev->dev);
957 		return ret;
958 	}
959 
960 	size = amdgpu_dpm_get_ppfeature_status(adev, buf);
961 	if (size <= 0)
962 		size = sysfs_emit(buf, "\n");
963 
964 	pm_runtime_mark_last_busy(ddev->dev);
965 	pm_runtime_put_autosuspend(ddev->dev);
966 
967 	return size;
968 }
969 
970 /**
971  * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
972  *
973  * The amdgpu driver provides a sysfs API for adjusting what power levels
974  * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
975  * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
976  * this.
977  *
978  * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
979  * Vega10 and later ASICs.
980  * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
981  *
982  * Reading back the files will show you the available power levels within
983  * the power state and the clock information for those levels.
984  *
985  * To manually adjust these states, first select manual using
986  * power_dpm_force_performance_level.
987  * Secondly, enter a new value for each level by inputing a string that
988  * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
989  * E.g.,
990  *
991  * .. code-block:: bash
992  *
993  *	echo "4 5 6" > pp_dpm_sclk
994  *
995  * will enable sclk levels 4, 5, and 6.
996  *
997  * NOTE: change to the dcefclk max dpm level is not supported now
998  */
999 
1000 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1001 		enum pp_clock_type type,
1002 		char *buf)
1003 {
1004 	struct drm_device *ddev = dev_get_drvdata(dev);
1005 	struct amdgpu_device *adev = drm_to_adev(ddev);
1006 	int size = 0;
1007 	int ret = 0;
1008 
1009 	if (amdgpu_in_reset(adev))
1010 		return -EPERM;
1011 	if (adev->in_suspend && !adev->in_runpm)
1012 		return -EPERM;
1013 
1014 	ret = pm_runtime_get_sync(ddev->dev);
1015 	if (ret < 0) {
1016 		pm_runtime_put_autosuspend(ddev->dev);
1017 		return ret;
1018 	}
1019 
1020 	ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1021 	if (ret == -ENOENT)
1022 		size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1023 
1024 	if (size == 0)
1025 		size = sysfs_emit(buf, "\n");
1026 
1027 	pm_runtime_mark_last_busy(ddev->dev);
1028 	pm_runtime_put_autosuspend(ddev->dev);
1029 
1030 	return size;
1031 }
1032 
1033 /*
1034  * Worst case: 32 bits individually specified, in octal at 12 characters
1035  * per line (+1 for \n).
1036  */
1037 #define AMDGPU_MASK_BUF_MAX	(32 * 13)
1038 
1039 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1040 {
1041 	int ret;
1042 	unsigned long level;
1043 	char *sub_str = NULL;
1044 	char *tmp;
1045 	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1046 	const char delimiter[3] = {' ', '\n', '\0'};
1047 	size_t bytes;
1048 
1049 	*mask = 0;
1050 
1051 	bytes = min(count, sizeof(buf_cpy) - 1);
1052 	memcpy(buf_cpy, buf, bytes);
1053 	buf_cpy[bytes] = '\0';
1054 	tmp = buf_cpy;
1055 	while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1056 		if (strlen(sub_str)) {
1057 			ret = kstrtoul(sub_str, 0, &level);
1058 			if (ret || level > 31)
1059 				return -EINVAL;
1060 			*mask |= 1 << level;
1061 		} else
1062 			break;
1063 	}
1064 
1065 	return 0;
1066 }
1067 
1068 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1069 		enum pp_clock_type type,
1070 		const char *buf,
1071 		size_t count)
1072 {
1073 	struct drm_device *ddev = dev_get_drvdata(dev);
1074 	struct amdgpu_device *adev = drm_to_adev(ddev);
1075 	int ret;
1076 	uint32_t mask = 0;
1077 
1078 	if (amdgpu_in_reset(adev))
1079 		return -EPERM;
1080 	if (adev->in_suspend && !adev->in_runpm)
1081 		return -EPERM;
1082 
1083 	ret = amdgpu_read_mask(buf, count, &mask);
1084 	if (ret)
1085 		return ret;
1086 
1087 	ret = pm_runtime_get_sync(ddev->dev);
1088 	if (ret < 0) {
1089 		pm_runtime_put_autosuspend(ddev->dev);
1090 		return ret;
1091 	}
1092 
1093 	ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1094 
1095 	pm_runtime_mark_last_busy(ddev->dev);
1096 	pm_runtime_put_autosuspend(ddev->dev);
1097 
1098 	if (ret)
1099 		return -EINVAL;
1100 
1101 	return count;
1102 }
1103 
1104 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1105 		struct device_attribute *attr,
1106 		char *buf)
1107 {
1108 	return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1109 }
1110 
1111 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1112 		struct device_attribute *attr,
1113 		const char *buf,
1114 		size_t count)
1115 {
1116 	return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1117 }
1118 
1119 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1120 		struct device_attribute *attr,
1121 		char *buf)
1122 {
1123 	return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1124 }
1125 
1126 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1127 		struct device_attribute *attr,
1128 		const char *buf,
1129 		size_t count)
1130 {
1131 	return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1132 }
1133 
1134 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1135 		struct device_attribute *attr,
1136 		char *buf)
1137 {
1138 	return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1139 }
1140 
1141 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1142 		struct device_attribute *attr,
1143 		const char *buf,
1144 		size_t count)
1145 {
1146 	return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1147 }
1148 
1149 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1150 		struct device_attribute *attr,
1151 		char *buf)
1152 {
1153 	return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1154 }
1155 
1156 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1157 		struct device_attribute *attr,
1158 		const char *buf,
1159 		size_t count)
1160 {
1161 	return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1162 }
1163 
1164 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1165 		struct device_attribute *attr,
1166 		char *buf)
1167 {
1168 	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1169 }
1170 
1171 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1172 		struct device_attribute *attr,
1173 		const char *buf,
1174 		size_t count)
1175 {
1176 	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1177 }
1178 
1179 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1180 		struct device_attribute *attr,
1181 		char *buf)
1182 {
1183 	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1184 }
1185 
1186 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1187 		struct device_attribute *attr,
1188 		const char *buf,
1189 		size_t count)
1190 {
1191 	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1192 }
1193 
1194 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1195 		struct device_attribute *attr,
1196 		char *buf)
1197 {
1198 	return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1199 }
1200 
1201 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1202 		struct device_attribute *attr,
1203 		const char *buf,
1204 		size_t count)
1205 {
1206 	return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1207 }
1208 
1209 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1210 		struct device_attribute *attr,
1211 		char *buf)
1212 {
1213 	return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1214 }
1215 
1216 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1217 		struct device_attribute *attr,
1218 		const char *buf,
1219 		size_t count)
1220 {
1221 	return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1222 }
1223 
1224 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1225 		struct device_attribute *attr,
1226 		char *buf)
1227 {
1228 	struct drm_device *ddev = dev_get_drvdata(dev);
1229 	struct amdgpu_device *adev = drm_to_adev(ddev);
1230 	uint32_t value = 0;
1231 	int ret;
1232 
1233 	if (amdgpu_in_reset(adev))
1234 		return -EPERM;
1235 	if (adev->in_suspend && !adev->in_runpm)
1236 		return -EPERM;
1237 
1238 	ret = pm_runtime_get_sync(ddev->dev);
1239 	if (ret < 0) {
1240 		pm_runtime_put_autosuspend(ddev->dev);
1241 		return ret;
1242 	}
1243 
1244 	value = amdgpu_dpm_get_sclk_od(adev);
1245 
1246 	pm_runtime_mark_last_busy(ddev->dev);
1247 	pm_runtime_put_autosuspend(ddev->dev);
1248 
1249 	return sysfs_emit(buf, "%d\n", value);
1250 }
1251 
1252 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1253 		struct device_attribute *attr,
1254 		const char *buf,
1255 		size_t count)
1256 {
1257 	struct drm_device *ddev = dev_get_drvdata(dev);
1258 	struct amdgpu_device *adev = drm_to_adev(ddev);
1259 	int ret;
1260 	long int value;
1261 
1262 	if (amdgpu_in_reset(adev))
1263 		return -EPERM;
1264 	if (adev->in_suspend && !adev->in_runpm)
1265 		return -EPERM;
1266 
1267 	ret = kstrtol(buf, 0, &value);
1268 
1269 	if (ret)
1270 		return -EINVAL;
1271 
1272 	ret = pm_runtime_get_sync(ddev->dev);
1273 	if (ret < 0) {
1274 		pm_runtime_put_autosuspend(ddev->dev);
1275 		return ret;
1276 	}
1277 
1278 	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1279 
1280 	pm_runtime_mark_last_busy(ddev->dev);
1281 	pm_runtime_put_autosuspend(ddev->dev);
1282 
1283 	return count;
1284 }
1285 
1286 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1287 		struct device_attribute *attr,
1288 		char *buf)
1289 {
1290 	struct drm_device *ddev = dev_get_drvdata(dev);
1291 	struct amdgpu_device *adev = drm_to_adev(ddev);
1292 	uint32_t value = 0;
1293 	int ret;
1294 
1295 	if (amdgpu_in_reset(adev))
1296 		return -EPERM;
1297 	if (adev->in_suspend && !adev->in_runpm)
1298 		return -EPERM;
1299 
1300 	ret = pm_runtime_get_sync(ddev->dev);
1301 	if (ret < 0) {
1302 		pm_runtime_put_autosuspend(ddev->dev);
1303 		return ret;
1304 	}
1305 
1306 	value = amdgpu_dpm_get_mclk_od(adev);
1307 
1308 	pm_runtime_mark_last_busy(ddev->dev);
1309 	pm_runtime_put_autosuspend(ddev->dev);
1310 
1311 	return sysfs_emit(buf, "%d\n", value);
1312 }
1313 
1314 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1315 		struct device_attribute *attr,
1316 		const char *buf,
1317 		size_t count)
1318 {
1319 	struct drm_device *ddev = dev_get_drvdata(dev);
1320 	struct amdgpu_device *adev = drm_to_adev(ddev);
1321 	int ret;
1322 	long int value;
1323 
1324 	if (amdgpu_in_reset(adev))
1325 		return -EPERM;
1326 	if (adev->in_suspend && !adev->in_runpm)
1327 		return -EPERM;
1328 
1329 	ret = kstrtol(buf, 0, &value);
1330 
1331 	if (ret)
1332 		return -EINVAL;
1333 
1334 	ret = pm_runtime_get_sync(ddev->dev);
1335 	if (ret < 0) {
1336 		pm_runtime_put_autosuspend(ddev->dev);
1337 		return ret;
1338 	}
1339 
1340 	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1341 
1342 	pm_runtime_mark_last_busy(ddev->dev);
1343 	pm_runtime_put_autosuspend(ddev->dev);
1344 
1345 	return count;
1346 }
1347 
1348 /**
1349  * DOC: pp_power_profile_mode
1350  *
1351  * The amdgpu driver provides a sysfs API for adjusting the heuristics
1352  * related to switching between power levels in a power state.  The file
1353  * pp_power_profile_mode is used for this.
1354  *
1355  * Reading this file outputs a list of all of the predefined power profiles
1356  * and the relevant heuristics settings for that profile.
1357  *
1358  * To select a profile or create a custom profile, first select manual using
1359  * power_dpm_force_performance_level.  Writing the number of a predefined
1360  * profile to pp_power_profile_mode will enable those heuristics.  To
1361  * create a custom set of heuristics, write a string of numbers to the file
1362  * starting with the number of the custom profile along with a setting
1363  * for each heuristic parameter.  Due to differences across asic families
1364  * the heuristic parameters vary from family to family.
1365  *
1366  */
1367 
1368 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1369 		struct device_attribute *attr,
1370 		char *buf)
1371 {
1372 	struct drm_device *ddev = dev_get_drvdata(dev);
1373 	struct amdgpu_device *adev = drm_to_adev(ddev);
1374 	ssize_t size;
1375 	int ret;
1376 
1377 	if (amdgpu_in_reset(adev))
1378 		return -EPERM;
1379 	if (adev->in_suspend && !adev->in_runpm)
1380 		return -EPERM;
1381 
1382 	ret = pm_runtime_get_sync(ddev->dev);
1383 	if (ret < 0) {
1384 		pm_runtime_put_autosuspend(ddev->dev);
1385 		return ret;
1386 	}
1387 
1388 	size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1389 	if (size <= 0)
1390 		size = sysfs_emit(buf, "\n");
1391 
1392 	pm_runtime_mark_last_busy(ddev->dev);
1393 	pm_runtime_put_autosuspend(ddev->dev);
1394 
1395 	return size;
1396 }
1397 
1398 
1399 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1400 		struct device_attribute *attr,
1401 		const char *buf,
1402 		size_t count)
1403 {
1404 	int ret;
1405 	struct drm_device *ddev = dev_get_drvdata(dev);
1406 	struct amdgpu_device *adev = drm_to_adev(ddev);
1407 	uint32_t parameter_size = 0;
1408 	long parameter[64];
1409 	char *sub_str, buf_cpy[128];
1410 	char *tmp_str;
1411 	uint32_t i = 0;
1412 	char tmp[2];
1413 	long int profile_mode = 0;
1414 	const char delimiter[3] = {' ', '\n', '\0'};
1415 
1416 	if (amdgpu_in_reset(adev))
1417 		return -EPERM;
1418 	if (adev->in_suspend && !adev->in_runpm)
1419 		return -EPERM;
1420 
1421 	tmp[0] = *(buf);
1422 	tmp[1] = '\0';
1423 	ret = kstrtol(tmp, 0, &profile_mode);
1424 	if (ret)
1425 		return -EINVAL;
1426 
1427 	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1428 		if (count < 2 || count > 127)
1429 			return -EINVAL;
1430 		while (isspace(*++buf))
1431 			i++;
1432 		memcpy(buf_cpy, buf, count-i);
1433 		tmp_str = buf_cpy;
1434 		while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1435 			if (strlen(sub_str) == 0)
1436 				continue;
1437 			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1438 			if (ret)
1439 				return -EINVAL;
1440 			parameter_size++;
1441 			while (isspace(*tmp_str))
1442 				tmp_str++;
1443 		}
1444 	}
1445 	parameter[parameter_size] = profile_mode;
1446 
1447 	ret = pm_runtime_get_sync(ddev->dev);
1448 	if (ret < 0) {
1449 		pm_runtime_put_autosuspend(ddev->dev);
1450 		return ret;
1451 	}
1452 
1453 	ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1454 
1455 	pm_runtime_mark_last_busy(ddev->dev);
1456 	pm_runtime_put_autosuspend(ddev->dev);
1457 
1458 	if (!ret)
1459 		return count;
1460 
1461 	return -EINVAL;
1462 }
1463 
1464 /**
1465  * DOC: gpu_busy_percent
1466  *
1467  * The amdgpu driver provides a sysfs API for reading how busy the GPU
1468  * is as a percentage.  The file gpu_busy_percent is used for this.
1469  * The SMU firmware computes a percentage of load based on the
1470  * aggregate activity level in the IP cores.
1471  */
1472 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1473 					   struct device_attribute *attr,
1474 					   char *buf)
1475 {
1476 	struct drm_device *ddev = dev_get_drvdata(dev);
1477 	struct amdgpu_device *adev = drm_to_adev(ddev);
1478 	int r, value, size = sizeof(value);
1479 
1480 	if (amdgpu_in_reset(adev))
1481 		return -EPERM;
1482 	if (adev->in_suspend && !adev->in_runpm)
1483 		return -EPERM;
1484 
1485 	r = pm_runtime_get_sync(ddev->dev);
1486 	if (r < 0) {
1487 		pm_runtime_put_autosuspend(ddev->dev);
1488 		return r;
1489 	}
1490 
1491 	/* read the IP busy sensor */
1492 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1493 				   (void *)&value, &size);
1494 
1495 	pm_runtime_mark_last_busy(ddev->dev);
1496 	pm_runtime_put_autosuspend(ddev->dev);
1497 
1498 	if (r)
1499 		return r;
1500 
1501 	return sysfs_emit(buf, "%d\n", value);
1502 }
1503 
1504 /**
1505  * DOC: mem_busy_percent
1506  *
1507  * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1508  * is as a percentage.  The file mem_busy_percent is used for this.
1509  * The SMU firmware computes a percentage of load based on the
1510  * aggregate activity level in the IP cores.
1511  */
1512 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1513 					   struct device_attribute *attr,
1514 					   char *buf)
1515 {
1516 	struct drm_device *ddev = dev_get_drvdata(dev);
1517 	struct amdgpu_device *adev = drm_to_adev(ddev);
1518 	int r, value, size = sizeof(value);
1519 
1520 	if (amdgpu_in_reset(adev))
1521 		return -EPERM;
1522 	if (adev->in_suspend && !adev->in_runpm)
1523 		return -EPERM;
1524 
1525 	r = pm_runtime_get_sync(ddev->dev);
1526 	if (r < 0) {
1527 		pm_runtime_put_autosuspend(ddev->dev);
1528 		return r;
1529 	}
1530 
1531 	/* read the IP busy sensor */
1532 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1533 				   (void *)&value, &size);
1534 
1535 	pm_runtime_mark_last_busy(ddev->dev);
1536 	pm_runtime_put_autosuspend(ddev->dev);
1537 
1538 	if (r)
1539 		return r;
1540 
1541 	return sysfs_emit(buf, "%d\n", value);
1542 }
1543 
1544 /**
1545  * DOC: pcie_bw
1546  *
1547  * The amdgpu driver provides a sysfs API for estimating how much data
1548  * has been received and sent by the GPU in the last second through PCIe.
1549  * The file pcie_bw is used for this.
1550  * The Perf counters count the number of received and sent messages and return
1551  * those values, as well as the maximum payload size of a PCIe packet (mps).
1552  * Note that it is not possible to easily and quickly obtain the size of each
1553  * packet transmitted, so we output the max payload size (mps) to allow for
1554  * quick estimation of the PCIe bandwidth usage
1555  */
1556 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1557 		struct device_attribute *attr,
1558 		char *buf)
1559 {
1560 	struct drm_device *ddev = dev_get_drvdata(dev);
1561 	struct amdgpu_device *adev = drm_to_adev(ddev);
1562 	uint64_t count0 = 0, count1 = 0;
1563 	int ret;
1564 
1565 	if (amdgpu_in_reset(adev))
1566 		return -EPERM;
1567 	if (adev->in_suspend && !adev->in_runpm)
1568 		return -EPERM;
1569 
1570 	if (adev->flags & AMD_IS_APU)
1571 		return -ENODATA;
1572 
1573 	if (!adev->asic_funcs->get_pcie_usage)
1574 		return -ENODATA;
1575 
1576 	ret = pm_runtime_get_sync(ddev->dev);
1577 	if (ret < 0) {
1578 		pm_runtime_put_autosuspend(ddev->dev);
1579 		return ret;
1580 	}
1581 
1582 	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1583 
1584 	pm_runtime_mark_last_busy(ddev->dev);
1585 	pm_runtime_put_autosuspend(ddev->dev);
1586 
1587 	return sysfs_emit(buf, "%llu %llu %i\n",
1588 			  count0, count1, pcie_get_mps(adev->pdev));
1589 }
1590 
1591 /**
1592  * DOC: unique_id
1593  *
1594  * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1595  * The file unique_id is used for this.
1596  * This will provide a Unique ID that will persist from machine to machine
1597  *
1598  * NOTE: This will only work for GFX9 and newer. This file will be absent
1599  * on unsupported ASICs (GFX8 and older)
1600  */
1601 static ssize_t amdgpu_get_unique_id(struct device *dev,
1602 		struct device_attribute *attr,
1603 		char *buf)
1604 {
1605 	struct drm_device *ddev = dev_get_drvdata(dev);
1606 	struct amdgpu_device *adev = drm_to_adev(ddev);
1607 
1608 	if (amdgpu_in_reset(adev))
1609 		return -EPERM;
1610 	if (adev->in_suspend && !adev->in_runpm)
1611 		return -EPERM;
1612 
1613 	if (adev->unique_id)
1614 		return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1615 
1616 	return 0;
1617 }
1618 
1619 /**
1620  * DOC: thermal_throttling_logging
1621  *
1622  * Thermal throttling pulls down the clock frequency and thus the performance.
1623  * It's an useful mechanism to protect the chip from overheating. Since it
1624  * impacts performance, the user controls whether it is enabled and if so,
1625  * the log frequency.
1626  *
1627  * Reading back the file shows you the status(enabled or disabled) and
1628  * the interval(in seconds) between each thermal logging.
1629  *
1630  * Writing an integer to the file, sets a new logging interval, in seconds.
1631  * The value should be between 1 and 3600. If the value is less than 1,
1632  * thermal logging is disabled. Values greater than 3600 are ignored.
1633  */
1634 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1635 						     struct device_attribute *attr,
1636 						     char *buf)
1637 {
1638 	struct drm_device *ddev = dev_get_drvdata(dev);
1639 	struct amdgpu_device *adev = drm_to_adev(ddev);
1640 
1641 	return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1642 			  adev_to_drm(adev)->unique,
1643 			  atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1644 			  adev->throttling_logging_rs.interval / HZ + 1);
1645 }
1646 
1647 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1648 						     struct device_attribute *attr,
1649 						     const char *buf,
1650 						     size_t count)
1651 {
1652 	struct drm_device *ddev = dev_get_drvdata(dev);
1653 	struct amdgpu_device *adev = drm_to_adev(ddev);
1654 	long throttling_logging_interval;
1655 	unsigned long flags;
1656 	int ret = 0;
1657 
1658 	ret = kstrtol(buf, 0, &throttling_logging_interval);
1659 	if (ret)
1660 		return ret;
1661 
1662 	if (throttling_logging_interval > 3600)
1663 		return -EINVAL;
1664 
1665 	if (throttling_logging_interval > 0) {
1666 		raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1667 		/*
1668 		 * Reset the ratelimit timer internals.
1669 		 * This can effectively restart the timer.
1670 		 */
1671 		adev->throttling_logging_rs.interval =
1672 			(throttling_logging_interval - 1) * HZ;
1673 		adev->throttling_logging_rs.begin = 0;
1674 		adev->throttling_logging_rs.printed = 0;
1675 		adev->throttling_logging_rs.missed = 0;
1676 		raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1677 
1678 		atomic_set(&adev->throttling_logging_enabled, 1);
1679 	} else {
1680 		atomic_set(&adev->throttling_logging_enabled, 0);
1681 	}
1682 
1683 	return count;
1684 }
1685 
1686 /**
1687  * DOC: gpu_metrics
1688  *
1689  * The amdgpu driver provides a sysfs API for retrieving current gpu
1690  * metrics data. The file gpu_metrics is used for this. Reading the
1691  * file will dump all the current gpu metrics data.
1692  *
1693  * These data include temperature, frequency, engines utilization,
1694  * power consume, throttler status, fan speed and cpu core statistics(
1695  * available for APU only). That's it will give a snapshot of all sensors
1696  * at the same time.
1697  */
1698 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1699 				      struct device_attribute *attr,
1700 				      char *buf)
1701 {
1702 	struct drm_device *ddev = dev_get_drvdata(dev);
1703 	struct amdgpu_device *adev = drm_to_adev(ddev);
1704 	void *gpu_metrics;
1705 	ssize_t size = 0;
1706 	int ret;
1707 
1708 	if (amdgpu_in_reset(adev))
1709 		return -EPERM;
1710 	if (adev->in_suspend && !adev->in_runpm)
1711 		return -EPERM;
1712 
1713 	ret = pm_runtime_get_sync(ddev->dev);
1714 	if (ret < 0) {
1715 		pm_runtime_put_autosuspend(ddev->dev);
1716 		return ret;
1717 	}
1718 
1719 	size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1720 	if (size <= 0)
1721 		goto out;
1722 
1723 	if (size >= PAGE_SIZE)
1724 		size = PAGE_SIZE - 1;
1725 
1726 	memcpy(buf, gpu_metrics, size);
1727 
1728 out:
1729 	pm_runtime_mark_last_busy(ddev->dev);
1730 	pm_runtime_put_autosuspend(ddev->dev);
1731 
1732 	return size;
1733 }
1734 
1735 /**
1736  * DOC: smartshift_apu_power
1737  *
1738  * The amdgpu driver provides a sysfs API for reporting APU power
1739  * share if it supports smartshift. The value is expressed as
1740  * the proportion of stapm limit where stapm limit is the total APU
1741  * power limit. The result is in percentage. If APU power is 130% of
1742  * STAPM, then APU is using 30% of the dGPU's headroom.
1743  */
1744 
1745 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1746 					       char *buf)
1747 {
1748 	struct drm_device *ddev = dev_get_drvdata(dev);
1749 	struct amdgpu_device *adev = drm_to_adev(ddev);
1750 	uint32_t ss_power, size;
1751 	int r = 0;
1752 
1753 	if (amdgpu_in_reset(adev))
1754 		return -EPERM;
1755 	if (adev->in_suspend && !adev->in_runpm)
1756 		return -EPERM;
1757 
1758 	r = pm_runtime_get_sync(ddev->dev);
1759 	if (r < 0) {
1760 		pm_runtime_put_autosuspend(ddev->dev);
1761 		return r;
1762 	}
1763 
1764 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1765 				   (void *)&ss_power, &size);
1766 	if (r)
1767 		goto out;
1768 
1769 	r = sysfs_emit(buf, "%u%%\n", ss_power);
1770 
1771 out:
1772 	pm_runtime_mark_last_busy(ddev->dev);
1773 	pm_runtime_put_autosuspend(ddev->dev);
1774 	return r;
1775 }
1776 
1777 /**
1778  * DOC: smartshift_dgpu_power
1779  *
1780  * The amdgpu driver provides a sysfs API for reporting the dGPU power
1781  * share if the device is in HG and supports smartshift. The value
1782  * is expressed as the proportion of stapm limit where stapm limit
1783  * is the total APU power limit. The value is in percentage. If dGPU
1784  * power is 20% higher than STAPM power(120%), it's using 20% of the
1785  * APU's power headroom.
1786  */
1787 
1788 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1789 						char *buf)
1790 {
1791 	struct drm_device *ddev = dev_get_drvdata(dev);
1792 	struct amdgpu_device *adev = drm_to_adev(ddev);
1793 	uint32_t ss_power, size;
1794 	int r = 0;
1795 
1796 	if (amdgpu_in_reset(adev))
1797 		return -EPERM;
1798 	if (adev->in_suspend && !adev->in_runpm)
1799 		return -EPERM;
1800 
1801 	r = pm_runtime_get_sync(ddev->dev);
1802 	if (r < 0) {
1803 		pm_runtime_put_autosuspend(ddev->dev);
1804 		return r;
1805 	}
1806 
1807 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1808 				   (void *)&ss_power, &size);
1809 
1810 	if (r)
1811 		goto out;
1812 
1813 	r = sysfs_emit(buf, "%u%%\n", ss_power);
1814 
1815 out:
1816 	pm_runtime_mark_last_busy(ddev->dev);
1817 	pm_runtime_put_autosuspend(ddev->dev);
1818 	return r;
1819 }
1820 
1821 /**
1822  * DOC: smartshift_bias
1823  *
1824  * The amdgpu driver provides a sysfs API for reporting the
1825  * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1826  * and the default is 0. -100 sets maximum preference to APU
1827  * and 100 sets max perference to dGPU.
1828  */
1829 
1830 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1831 					  struct device_attribute *attr,
1832 					  char *buf)
1833 {
1834 	int r = 0;
1835 
1836 	r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1837 
1838 	return r;
1839 }
1840 
1841 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1842 					  struct device_attribute *attr,
1843 					  const char *buf, size_t count)
1844 {
1845 	struct drm_device *ddev = dev_get_drvdata(dev);
1846 	struct amdgpu_device *adev = drm_to_adev(ddev);
1847 	int r = 0;
1848 	int bias = 0;
1849 
1850 	if (amdgpu_in_reset(adev))
1851 		return -EPERM;
1852 	if (adev->in_suspend && !adev->in_runpm)
1853 		return -EPERM;
1854 
1855 	r = pm_runtime_get_sync(ddev->dev);
1856 	if (r < 0) {
1857 		pm_runtime_put_autosuspend(ddev->dev);
1858 		return r;
1859 	}
1860 
1861 	r = kstrtoint(buf, 10, &bias);
1862 	if (r)
1863 		goto out;
1864 
1865 	if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1866 		bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1867 	else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1868 		bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1869 
1870 	amdgpu_smartshift_bias = bias;
1871 	r = count;
1872 
1873 	/* TODO: update bias level with SMU message */
1874 
1875 out:
1876 	pm_runtime_mark_last_busy(ddev->dev);
1877 	pm_runtime_put_autosuspend(ddev->dev);
1878 	return r;
1879 }
1880 
1881 
1882 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1883 				uint32_t mask, enum amdgpu_device_attr_states *states)
1884 {
1885 	uint32_t ss_power, size;
1886 
1887 	if (!amdgpu_acpi_is_power_shift_control_supported())
1888 		*states = ATTR_STATE_UNSUPPORTED;
1889 	else if ((adev->flags & AMD_IS_PX) &&
1890 		 !amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1891 		*states = ATTR_STATE_UNSUPPORTED;
1892 	else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1893 		 (void *)&ss_power, &size))
1894 		*states = ATTR_STATE_UNSUPPORTED;
1895 	else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1896 		 (void *)&ss_power, &size))
1897 		*states = ATTR_STATE_UNSUPPORTED;
1898 
1899 	return 0;
1900 }
1901 
1902 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1903 			       uint32_t mask, enum amdgpu_device_attr_states *states)
1904 {
1905 	uint32_t ss_power, size;
1906 
1907 	if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1908 		*states = ATTR_STATE_UNSUPPORTED;
1909 	else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1910 		 (void *)&ss_power, &size))
1911 		*states = ATTR_STATE_UNSUPPORTED;
1912 	else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1913 		 (void *)&ss_power, &size))
1914 		*states = ATTR_STATE_UNSUPPORTED;
1915 
1916 	return 0;
1917 }
1918 
1919 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1920 	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1921 	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1922 	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1923 	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1924 	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1925 	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1926 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1927 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1928 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1929 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1930 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1931 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1932 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1933 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1934 	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
1935 	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),
1936 	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1937 	AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,			ATTR_FLAG_BASIC),
1938 	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1939 	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1940 	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),
1941 	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1942 	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1943 	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1944 	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1945 	AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,			ATTR_FLAG_BASIC,
1946 			      .attr_update = ss_power_attr_update),
1947 	AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,			ATTR_FLAG_BASIC,
1948 			      .attr_update = ss_power_attr_update),
1949 	AMDGPU_DEVICE_ATTR_RW(smartshift_bias,				ATTR_FLAG_BASIC,
1950 			      .attr_update = ss_bias_attr_update),
1951 };
1952 
1953 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1954 			       uint32_t mask, enum amdgpu_device_attr_states *states)
1955 {
1956 	struct device_attribute *dev_attr = &attr->dev_attr;
1957 	const char *attr_name = dev_attr->attr.name;
1958 	enum amd_asic_type asic_type = adev->asic_type;
1959 
1960 	if (!(attr->flags & mask)) {
1961 		*states = ATTR_STATE_UNSUPPORTED;
1962 		return 0;
1963 	}
1964 
1965 #define DEVICE_ATTR_IS(_name)	(!strcmp(attr_name, #_name))
1966 
1967 	if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1968 		if (asic_type < CHIP_VEGA10)
1969 			*states = ATTR_STATE_UNSUPPORTED;
1970 	} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
1971 		if (asic_type < CHIP_VEGA10 ||
1972 		    asic_type == CHIP_ARCTURUS ||
1973 		    asic_type == CHIP_ALDEBARAN)
1974 			*states = ATTR_STATE_UNSUPPORTED;
1975 	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1976 		if (asic_type < CHIP_VEGA20)
1977 			*states = ATTR_STATE_UNSUPPORTED;
1978 	} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
1979 		*states = ATTR_STATE_UNSUPPORTED;
1980 		if (amdgpu_dpm_is_overdrive_supported(adev))
1981 			*states = ATTR_STATE_SUPPORTED;
1982 	} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
1983 		if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
1984 			*states = ATTR_STATE_UNSUPPORTED;
1985 	} else if (DEVICE_ATTR_IS(pcie_bw)) {
1986 		/* PCIe Perf counters won't work on APU nodes */
1987 		if (adev->flags & AMD_IS_APU)
1988 			*states = ATTR_STATE_UNSUPPORTED;
1989 	} else if (DEVICE_ATTR_IS(unique_id)) {
1990 		if (asic_type != CHIP_VEGA10 &&
1991 		    asic_type != CHIP_VEGA20 &&
1992 		    asic_type != CHIP_ARCTURUS &&
1993 		    asic_type != CHIP_ALDEBARAN)
1994 			*states = ATTR_STATE_UNSUPPORTED;
1995 	} else if (DEVICE_ATTR_IS(pp_features)) {
1996 		if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
1997 			*states = ATTR_STATE_UNSUPPORTED;
1998 	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
1999 		if (asic_type < CHIP_VEGA12)
2000 			*states = ATTR_STATE_UNSUPPORTED;
2001 	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2002 		if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
2003 			*states = ATTR_STATE_UNSUPPORTED;
2004 	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2005 		if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
2006 			*states = ATTR_STATE_UNSUPPORTED;
2007 	} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2008 		if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2009 			*states = ATTR_STATE_UNSUPPORTED;
2010 	}
2011 
2012 	switch (asic_type) {
2013 	case CHIP_ARCTURUS:
2014 	case CHIP_ALDEBARAN:
2015 		/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2016 		if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2017 		    DEVICE_ATTR_IS(pp_dpm_socclk) ||
2018 		    DEVICE_ATTR_IS(pp_dpm_fclk)) {
2019 			dev_attr->attr.mode &= ~S_IWUGO;
2020 			dev_attr->store = NULL;
2021 		}
2022 		break;
2023 	default:
2024 		break;
2025 	}
2026 
2027 	if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2028 		/* SMU MP1 does not support dcefclk level setting */
2029 		if (asic_type >= CHIP_NAVI10) {
2030 			dev_attr->attr.mode &= ~S_IWUGO;
2031 			dev_attr->store = NULL;
2032 		}
2033 	}
2034 
2035 	/* setting should not be allowed from VF if not in one VF mode */
2036 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
2037 		dev_attr->attr.mode &= ~S_IWUGO;
2038 		dev_attr->store = NULL;
2039 	}
2040 
2041 #undef DEVICE_ATTR_IS
2042 
2043 	return 0;
2044 }
2045 
2046 
2047 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2048 				     struct amdgpu_device_attr *attr,
2049 				     uint32_t mask, struct list_head *attr_list)
2050 {
2051 	int ret = 0;
2052 	struct device_attribute *dev_attr = &attr->dev_attr;
2053 	const char *name = dev_attr->attr.name;
2054 	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2055 	struct amdgpu_device_attr_entry *attr_entry;
2056 
2057 	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2058 			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2059 
2060 	BUG_ON(!attr);
2061 
2062 	attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2063 
2064 	ret = attr_update(adev, attr, mask, &attr_states);
2065 	if (ret) {
2066 		dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2067 			name, ret);
2068 		return ret;
2069 	}
2070 
2071 	if (attr_states == ATTR_STATE_UNSUPPORTED)
2072 		return 0;
2073 
2074 	ret = device_create_file(adev->dev, dev_attr);
2075 	if (ret) {
2076 		dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2077 			name, ret);
2078 	}
2079 
2080 	attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2081 	if (!attr_entry)
2082 		return -ENOMEM;
2083 
2084 	attr_entry->attr = attr;
2085 	INIT_LIST_HEAD(&attr_entry->entry);
2086 
2087 	list_add_tail(&attr_entry->entry, attr_list);
2088 
2089 	return ret;
2090 }
2091 
2092 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2093 {
2094 	struct device_attribute *dev_attr = &attr->dev_attr;
2095 
2096 	device_remove_file(adev->dev, dev_attr);
2097 }
2098 
2099 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2100 					     struct list_head *attr_list);
2101 
2102 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2103 					    struct amdgpu_device_attr *attrs,
2104 					    uint32_t counts,
2105 					    uint32_t mask,
2106 					    struct list_head *attr_list)
2107 {
2108 	int ret = 0;
2109 	uint32_t i = 0;
2110 
2111 	for (i = 0; i < counts; i++) {
2112 		ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2113 		if (ret)
2114 			goto failed;
2115 	}
2116 
2117 	return 0;
2118 
2119 failed:
2120 	amdgpu_device_attr_remove_groups(adev, attr_list);
2121 
2122 	return ret;
2123 }
2124 
2125 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2126 					     struct list_head *attr_list)
2127 {
2128 	struct amdgpu_device_attr_entry *entry, *entry_tmp;
2129 
2130 	if (list_empty(attr_list))
2131 		return ;
2132 
2133 	list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2134 		amdgpu_device_attr_remove(adev, entry->attr);
2135 		list_del(&entry->entry);
2136 		kfree(entry);
2137 	}
2138 }
2139 
2140 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2141 				      struct device_attribute *attr,
2142 				      char *buf)
2143 {
2144 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2145 	int channel = to_sensor_dev_attr(attr)->index;
2146 	int r, temp = 0, size = sizeof(temp);
2147 
2148 	if (amdgpu_in_reset(adev))
2149 		return -EPERM;
2150 	if (adev->in_suspend && !adev->in_runpm)
2151 		return -EPERM;
2152 
2153 	if (channel >= PP_TEMP_MAX)
2154 		return -EINVAL;
2155 
2156 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2157 	if (r < 0) {
2158 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2159 		return r;
2160 	}
2161 
2162 	switch (channel) {
2163 	case PP_TEMP_JUNCTION:
2164 		/* get current junction temperature */
2165 		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2166 					   (void *)&temp, &size);
2167 		break;
2168 	case PP_TEMP_EDGE:
2169 		/* get current edge temperature */
2170 		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2171 					   (void *)&temp, &size);
2172 		break;
2173 	case PP_TEMP_MEM:
2174 		/* get current memory temperature */
2175 		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2176 					   (void *)&temp, &size);
2177 		break;
2178 	default:
2179 		r = -EINVAL;
2180 		break;
2181 	}
2182 
2183 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2184 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2185 
2186 	if (r)
2187 		return r;
2188 
2189 	return sysfs_emit(buf, "%d\n", temp);
2190 }
2191 
2192 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2193 					     struct device_attribute *attr,
2194 					     char *buf)
2195 {
2196 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2197 	int hyst = to_sensor_dev_attr(attr)->index;
2198 	int temp;
2199 
2200 	if (hyst)
2201 		temp = adev->pm.dpm.thermal.min_temp;
2202 	else
2203 		temp = adev->pm.dpm.thermal.max_temp;
2204 
2205 	return sysfs_emit(buf, "%d\n", temp);
2206 }
2207 
2208 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2209 					     struct device_attribute *attr,
2210 					     char *buf)
2211 {
2212 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2213 	int hyst = to_sensor_dev_attr(attr)->index;
2214 	int temp;
2215 
2216 	if (hyst)
2217 		temp = adev->pm.dpm.thermal.min_hotspot_temp;
2218 	else
2219 		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2220 
2221 	return sysfs_emit(buf, "%d\n", temp);
2222 }
2223 
2224 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2225 					     struct device_attribute *attr,
2226 					     char *buf)
2227 {
2228 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2229 	int hyst = to_sensor_dev_attr(attr)->index;
2230 	int temp;
2231 
2232 	if (hyst)
2233 		temp = adev->pm.dpm.thermal.min_mem_temp;
2234 	else
2235 		temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2236 
2237 	return sysfs_emit(buf, "%d\n", temp);
2238 }
2239 
2240 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2241 					     struct device_attribute *attr,
2242 					     char *buf)
2243 {
2244 	int channel = to_sensor_dev_attr(attr)->index;
2245 
2246 	if (channel >= PP_TEMP_MAX)
2247 		return -EINVAL;
2248 
2249 	return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2250 }
2251 
2252 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2253 					     struct device_attribute *attr,
2254 					     char *buf)
2255 {
2256 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2257 	int channel = to_sensor_dev_attr(attr)->index;
2258 	int temp = 0;
2259 
2260 	if (channel >= PP_TEMP_MAX)
2261 		return -EINVAL;
2262 
2263 	switch (channel) {
2264 	case PP_TEMP_JUNCTION:
2265 		temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2266 		break;
2267 	case PP_TEMP_EDGE:
2268 		temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2269 		break;
2270 	case PP_TEMP_MEM:
2271 		temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2272 		break;
2273 	}
2274 
2275 	return sysfs_emit(buf, "%d\n", temp);
2276 }
2277 
2278 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2279 					    struct device_attribute *attr,
2280 					    char *buf)
2281 {
2282 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2283 	u32 pwm_mode = 0;
2284 	int ret;
2285 
2286 	if (amdgpu_in_reset(adev))
2287 		return -EPERM;
2288 	if (adev->in_suspend && !adev->in_runpm)
2289 		return -EPERM;
2290 
2291 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2292 	if (ret < 0) {
2293 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2294 		return ret;
2295 	}
2296 
2297 	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2298 
2299 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2300 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2301 
2302 	if (ret)
2303 		return -EINVAL;
2304 
2305 	return sysfs_emit(buf, "%u\n", pwm_mode);
2306 }
2307 
2308 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2309 					    struct device_attribute *attr,
2310 					    const char *buf,
2311 					    size_t count)
2312 {
2313 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2314 	int err, ret;
2315 	int value;
2316 
2317 	if (amdgpu_in_reset(adev))
2318 		return -EPERM;
2319 	if (adev->in_suspend && !adev->in_runpm)
2320 		return -EPERM;
2321 
2322 	err = kstrtoint(buf, 10, &value);
2323 	if (err)
2324 		return err;
2325 
2326 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2327 	if (ret < 0) {
2328 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2329 		return ret;
2330 	}
2331 
2332 	ret = amdgpu_dpm_set_fan_control_mode(adev, value);
2333 
2334 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2335 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2336 
2337 	if (ret)
2338 		return -EINVAL;
2339 
2340 	return count;
2341 }
2342 
2343 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2344 					 struct device_attribute *attr,
2345 					 char *buf)
2346 {
2347 	return sysfs_emit(buf, "%i\n", 0);
2348 }
2349 
2350 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2351 					 struct device_attribute *attr,
2352 					 char *buf)
2353 {
2354 	return sysfs_emit(buf, "%i\n", 255);
2355 }
2356 
2357 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2358 				     struct device_attribute *attr,
2359 				     const char *buf, size_t count)
2360 {
2361 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2362 	int err;
2363 	u32 value;
2364 	u32 pwm_mode;
2365 
2366 	if (amdgpu_in_reset(adev))
2367 		return -EPERM;
2368 	if (adev->in_suspend && !adev->in_runpm)
2369 		return -EPERM;
2370 
2371 	err = kstrtou32(buf, 10, &value);
2372 	if (err)
2373 		return err;
2374 
2375 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2376 	if (err < 0) {
2377 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2378 		return err;
2379 	}
2380 
2381 	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2382 	if (err)
2383 		goto out;
2384 
2385 	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2386 		pr_info("manual fan speed control should be enabled first\n");
2387 		err = -EINVAL;
2388 		goto out;
2389 	}
2390 
2391 	err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2392 
2393 out:
2394 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2395 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2396 
2397 	if (err)
2398 		return err;
2399 
2400 	return count;
2401 }
2402 
2403 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2404 				     struct device_attribute *attr,
2405 				     char *buf)
2406 {
2407 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2408 	int err;
2409 	u32 speed = 0;
2410 
2411 	if (amdgpu_in_reset(adev))
2412 		return -EPERM;
2413 	if (adev->in_suspend && !adev->in_runpm)
2414 		return -EPERM;
2415 
2416 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2417 	if (err < 0) {
2418 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2419 		return err;
2420 	}
2421 
2422 	err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2423 
2424 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2425 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2426 
2427 	if (err)
2428 		return err;
2429 
2430 	return sysfs_emit(buf, "%i\n", speed);
2431 }
2432 
2433 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2434 					   struct device_attribute *attr,
2435 					   char *buf)
2436 {
2437 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2438 	int err;
2439 	u32 speed = 0;
2440 
2441 	if (amdgpu_in_reset(adev))
2442 		return -EPERM;
2443 	if (adev->in_suspend && !adev->in_runpm)
2444 		return -EPERM;
2445 
2446 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2447 	if (err < 0) {
2448 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2449 		return err;
2450 	}
2451 
2452 	err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2453 
2454 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2455 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2456 
2457 	if (err)
2458 		return err;
2459 
2460 	return sysfs_emit(buf, "%i\n", speed);
2461 }
2462 
2463 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2464 					 struct device_attribute *attr,
2465 					 char *buf)
2466 {
2467 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2468 	u32 min_rpm = 0;
2469 	u32 size = sizeof(min_rpm);
2470 	int r;
2471 
2472 	if (amdgpu_in_reset(adev))
2473 		return -EPERM;
2474 	if (adev->in_suspend && !adev->in_runpm)
2475 		return -EPERM;
2476 
2477 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2478 	if (r < 0) {
2479 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2480 		return r;
2481 	}
2482 
2483 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2484 				   (void *)&min_rpm, &size);
2485 
2486 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2487 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2488 
2489 	if (r)
2490 		return r;
2491 
2492 	return sysfs_emit(buf, "%d\n", min_rpm);
2493 }
2494 
2495 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2496 					 struct device_attribute *attr,
2497 					 char *buf)
2498 {
2499 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2500 	u32 max_rpm = 0;
2501 	u32 size = sizeof(max_rpm);
2502 	int r;
2503 
2504 	if (amdgpu_in_reset(adev))
2505 		return -EPERM;
2506 	if (adev->in_suspend && !adev->in_runpm)
2507 		return -EPERM;
2508 
2509 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2510 	if (r < 0) {
2511 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2512 		return r;
2513 	}
2514 
2515 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2516 				   (void *)&max_rpm, &size);
2517 
2518 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2519 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2520 
2521 	if (r)
2522 		return r;
2523 
2524 	return sysfs_emit(buf, "%d\n", max_rpm);
2525 }
2526 
2527 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2528 					   struct device_attribute *attr,
2529 					   char *buf)
2530 {
2531 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2532 	int err;
2533 	u32 rpm = 0;
2534 
2535 	if (amdgpu_in_reset(adev))
2536 		return -EPERM;
2537 	if (adev->in_suspend && !adev->in_runpm)
2538 		return -EPERM;
2539 
2540 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2541 	if (err < 0) {
2542 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2543 		return err;
2544 	}
2545 
2546 	err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2547 
2548 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2549 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2550 
2551 	if (err)
2552 		return err;
2553 
2554 	return sysfs_emit(buf, "%i\n", rpm);
2555 }
2556 
2557 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2558 				     struct device_attribute *attr,
2559 				     const char *buf, size_t count)
2560 {
2561 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2562 	int err;
2563 	u32 value;
2564 	u32 pwm_mode;
2565 
2566 	if (amdgpu_in_reset(adev))
2567 		return -EPERM;
2568 	if (adev->in_suspend && !adev->in_runpm)
2569 		return -EPERM;
2570 
2571 	err = kstrtou32(buf, 10, &value);
2572 	if (err)
2573 		return err;
2574 
2575 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2576 	if (err < 0) {
2577 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2578 		return err;
2579 	}
2580 
2581 	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2582 	if (err)
2583 		goto out;
2584 
2585 	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2586 		err = -ENODATA;
2587 		goto out;
2588 	}
2589 
2590 	err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2591 
2592 out:
2593 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2594 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2595 
2596 	if (err)
2597 		return err;
2598 
2599 	return count;
2600 }
2601 
2602 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2603 					    struct device_attribute *attr,
2604 					    char *buf)
2605 {
2606 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2607 	u32 pwm_mode = 0;
2608 	int ret;
2609 
2610 	if (amdgpu_in_reset(adev))
2611 		return -EPERM;
2612 	if (adev->in_suspend && !adev->in_runpm)
2613 		return -EPERM;
2614 
2615 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2616 	if (ret < 0) {
2617 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2618 		return ret;
2619 	}
2620 
2621 	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2622 
2623 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2624 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2625 
2626 	if (ret)
2627 		return -EINVAL;
2628 
2629 	return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2630 }
2631 
2632 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2633 					    struct device_attribute *attr,
2634 					    const char *buf,
2635 					    size_t count)
2636 {
2637 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2638 	int err;
2639 	int value;
2640 	u32 pwm_mode;
2641 
2642 	if (amdgpu_in_reset(adev))
2643 		return -EPERM;
2644 	if (adev->in_suspend && !adev->in_runpm)
2645 		return -EPERM;
2646 
2647 	err = kstrtoint(buf, 10, &value);
2648 	if (err)
2649 		return err;
2650 
2651 	if (value == 0)
2652 		pwm_mode = AMD_FAN_CTRL_AUTO;
2653 	else if (value == 1)
2654 		pwm_mode = AMD_FAN_CTRL_MANUAL;
2655 	else
2656 		return -EINVAL;
2657 
2658 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2659 	if (err < 0) {
2660 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2661 		return err;
2662 	}
2663 
2664 	err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2665 
2666 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2667 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2668 
2669 	if (err)
2670 		return -EINVAL;
2671 
2672 	return count;
2673 }
2674 
2675 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2676 					struct device_attribute *attr,
2677 					char *buf)
2678 {
2679 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2680 	u32 vddgfx;
2681 	int r, size = sizeof(vddgfx);
2682 
2683 	if (amdgpu_in_reset(adev))
2684 		return -EPERM;
2685 	if (adev->in_suspend && !adev->in_runpm)
2686 		return -EPERM;
2687 
2688 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2689 	if (r < 0) {
2690 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2691 		return r;
2692 	}
2693 
2694 	/* get the voltage */
2695 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2696 				   (void *)&vddgfx, &size);
2697 
2698 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2699 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2700 
2701 	if (r)
2702 		return r;
2703 
2704 	return sysfs_emit(buf, "%d\n", vddgfx);
2705 }
2706 
2707 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2708 					      struct device_attribute *attr,
2709 					      char *buf)
2710 {
2711 	return sysfs_emit(buf, "vddgfx\n");
2712 }
2713 
2714 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2715 				       struct device_attribute *attr,
2716 				       char *buf)
2717 {
2718 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2719 	u32 vddnb;
2720 	int r, size = sizeof(vddnb);
2721 
2722 	if (amdgpu_in_reset(adev))
2723 		return -EPERM;
2724 	if (adev->in_suspend && !adev->in_runpm)
2725 		return -EPERM;
2726 
2727 	/* only APUs have vddnb */
2728 	if  (!(adev->flags & AMD_IS_APU))
2729 		return -EINVAL;
2730 
2731 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2732 	if (r < 0) {
2733 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2734 		return r;
2735 	}
2736 
2737 	/* get the voltage */
2738 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2739 				   (void *)&vddnb, &size);
2740 
2741 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2742 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2743 
2744 	if (r)
2745 		return r;
2746 
2747 	return sysfs_emit(buf, "%d\n", vddnb);
2748 }
2749 
2750 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2751 					      struct device_attribute *attr,
2752 					      char *buf)
2753 {
2754 	return sysfs_emit(buf, "vddnb\n");
2755 }
2756 
2757 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2758 					   struct device_attribute *attr,
2759 					   char *buf)
2760 {
2761 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2762 	u32 query = 0;
2763 	int r, size = sizeof(u32);
2764 	unsigned uw;
2765 
2766 	if (amdgpu_in_reset(adev))
2767 		return -EPERM;
2768 	if (adev->in_suspend && !adev->in_runpm)
2769 		return -EPERM;
2770 
2771 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2772 	if (r < 0) {
2773 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2774 		return r;
2775 	}
2776 
2777 	/* get the voltage */
2778 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2779 				   (void *)&query, &size);
2780 
2781 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2782 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2783 
2784 	if (r)
2785 		return r;
2786 
2787 	/* convert to microwatts */
2788 	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2789 
2790 	return sysfs_emit(buf, "%u\n", uw);
2791 }
2792 
2793 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2794 					 struct device_attribute *attr,
2795 					 char *buf)
2796 {
2797 	return sysfs_emit(buf, "%i\n", 0);
2798 }
2799 
2800 
2801 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2802 					struct device_attribute *attr,
2803 					char *buf,
2804 					enum pp_power_limit_level pp_limit_level)
2805 {
2806 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2807 	enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2808 	uint32_t limit;
2809 	ssize_t size;
2810 	int r;
2811 
2812 	if (amdgpu_in_reset(adev))
2813 		return -EPERM;
2814 	if (adev->in_suspend && !adev->in_runpm)
2815 		return -EPERM;
2816 
2817 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2818 	if (r < 0) {
2819 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2820 		return r;
2821 	}
2822 
2823 	r = amdgpu_dpm_get_power_limit(adev, &limit,
2824 				      pp_limit_level, power_type);
2825 
2826 	if (!r)
2827 		size = sysfs_emit(buf, "%u\n", limit * 1000000);
2828 	else
2829 		size = sysfs_emit(buf, "\n");
2830 
2831 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2832 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2833 
2834 	return size;
2835 }
2836 
2837 
2838 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2839 					 struct device_attribute *attr,
2840 					 char *buf)
2841 {
2842 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
2843 
2844 }
2845 
2846 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2847 					 struct device_attribute *attr,
2848 					 char *buf)
2849 {
2850 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
2851 
2852 }
2853 
2854 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2855 					 struct device_attribute *attr,
2856 					 char *buf)
2857 {
2858 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
2859 
2860 }
2861 
2862 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2863 					 struct device_attribute *attr,
2864 					 char *buf)
2865 {
2866 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2867 
2868 	if (adev->asic_type == CHIP_VANGOGH)
2869 		return sysfs_emit(buf, "%s\n",
2870 				  to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
2871 				  "fastPPT" : "slowPPT");
2872 	else
2873 		return sysfs_emit(buf, "PPT\n");
2874 }
2875 
2876 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2877 		struct device_attribute *attr,
2878 		const char *buf,
2879 		size_t count)
2880 {
2881 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2882 	int limit_type = to_sensor_dev_attr(attr)->index;
2883 	int err;
2884 	u32 value;
2885 
2886 	if (amdgpu_in_reset(adev))
2887 		return -EPERM;
2888 	if (adev->in_suspend && !adev->in_runpm)
2889 		return -EPERM;
2890 
2891 	if (amdgpu_sriov_vf(adev))
2892 		return -EINVAL;
2893 
2894 	err = kstrtou32(buf, 10, &value);
2895 	if (err)
2896 		return err;
2897 
2898 	value = value / 1000000; /* convert to Watt */
2899 	value |= limit_type << 24;
2900 
2901 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2902 	if (err < 0) {
2903 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2904 		return err;
2905 	}
2906 
2907 	err = amdgpu_dpm_set_power_limit(adev, value);
2908 
2909 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2910 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2911 
2912 	if (err)
2913 		return err;
2914 
2915 	return count;
2916 }
2917 
2918 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2919 				      struct device_attribute *attr,
2920 				      char *buf)
2921 {
2922 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2923 	uint32_t sclk;
2924 	int r, size = sizeof(sclk);
2925 
2926 	if (amdgpu_in_reset(adev))
2927 		return -EPERM;
2928 	if (adev->in_suspend && !adev->in_runpm)
2929 		return -EPERM;
2930 
2931 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2932 	if (r < 0) {
2933 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2934 		return r;
2935 	}
2936 
2937 	/* get the sclk */
2938 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2939 				   (void *)&sclk, &size);
2940 
2941 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2942 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2943 
2944 	if (r)
2945 		return r;
2946 
2947 	return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
2948 }
2949 
2950 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2951 					    struct device_attribute *attr,
2952 					    char *buf)
2953 {
2954 	return sysfs_emit(buf, "sclk\n");
2955 }
2956 
2957 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2958 				      struct device_attribute *attr,
2959 				      char *buf)
2960 {
2961 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2962 	uint32_t mclk;
2963 	int r, size = sizeof(mclk);
2964 
2965 	if (amdgpu_in_reset(adev))
2966 		return -EPERM;
2967 	if (adev->in_suspend && !adev->in_runpm)
2968 		return -EPERM;
2969 
2970 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2971 	if (r < 0) {
2972 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2973 		return r;
2974 	}
2975 
2976 	/* get the sclk */
2977 	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2978 				   (void *)&mclk, &size);
2979 
2980 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2981 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2982 
2983 	if (r)
2984 		return r;
2985 
2986 	return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
2987 }
2988 
2989 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2990 					    struct device_attribute *attr,
2991 					    char *buf)
2992 {
2993 	return sysfs_emit(buf, "mclk\n");
2994 }
2995 
2996 /**
2997  * DOC: hwmon
2998  *
2999  * The amdgpu driver exposes the following sensor interfaces:
3000  *
3001  * - GPU temperature (via the on-die sensor)
3002  *
3003  * - GPU voltage
3004  *
3005  * - Northbridge voltage (APUs only)
3006  *
3007  * - GPU power
3008  *
3009  * - GPU fan
3010  *
3011  * - GPU gfx/compute engine clock
3012  *
3013  * - GPU memory clock (dGPU only)
3014  *
3015  * hwmon interfaces for GPU temperature:
3016  *
3017  * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3018  *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3019  *
3020  * - temp[1-3]_label: temperature channel label
3021  *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
3022  *
3023  * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3024  *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3025  *
3026  * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3027  *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3028  *
3029  * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3030  *   - these are supported on SOC15 dGPUs only
3031  *
3032  * hwmon interfaces for GPU voltage:
3033  *
3034  * - in0_input: the voltage on the GPU in millivolts
3035  *
3036  * - in1_input: the voltage on the Northbridge in millivolts
3037  *
3038  * hwmon interfaces for GPU power:
3039  *
3040  * - power1_average: average power used by the GPU in microWatts
3041  *
3042  * - power1_cap_min: minimum cap supported in microWatts
3043  *
3044  * - power1_cap_max: maximum cap supported in microWatts
3045  *
3046  * - power1_cap: selected power cap in microWatts
3047  *
3048  * hwmon interfaces for GPU fan:
3049  *
3050  * - pwm1: pulse width modulation fan level (0-255)
3051  *
3052  * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3053  *
3054  * - pwm1_min: pulse width modulation fan control minimum level (0)
3055  *
3056  * - pwm1_max: pulse width modulation fan control maximum level (255)
3057  *
3058  * - fan1_min: a minimum value Unit: revolution/min (RPM)
3059  *
3060  * - fan1_max: a maximum value Unit: revolution/max (RPM)
3061  *
3062  * - fan1_input: fan speed in RPM
3063  *
3064  * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3065  *
3066  * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3067  *
3068  * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3069  *       That will get the former one overridden.
3070  *
3071  * hwmon interfaces for GPU clocks:
3072  *
3073  * - freq1_input: the gfx/compute clock in hertz
3074  *
3075  * - freq2_input: the memory clock in hertz
3076  *
3077  * You can use hwmon tools like sensors to view this information on your system.
3078  *
3079  */
3080 
3081 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3082 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3083 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3084 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3085 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3086 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3087 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3088 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3089 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3090 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3091 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3092 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3093 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3094 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3095 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3096 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3097 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3098 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3099 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3100 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3101 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3102 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3103 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3104 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3105 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3106 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3107 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3108 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3109 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3110 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3111 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3112 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3113 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3114 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3115 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3116 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3117 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3118 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3119 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3120 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3121 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3122 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3123 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3124 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3125 
3126 static struct attribute *hwmon_attributes[] = {
3127 	&sensor_dev_attr_temp1_input.dev_attr.attr,
3128 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
3129 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3130 	&sensor_dev_attr_temp2_input.dev_attr.attr,
3131 	&sensor_dev_attr_temp2_crit.dev_attr.attr,
3132 	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3133 	&sensor_dev_attr_temp3_input.dev_attr.attr,
3134 	&sensor_dev_attr_temp3_crit.dev_attr.attr,
3135 	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3136 	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
3137 	&sensor_dev_attr_temp2_emergency.dev_attr.attr,
3138 	&sensor_dev_attr_temp3_emergency.dev_attr.attr,
3139 	&sensor_dev_attr_temp1_label.dev_attr.attr,
3140 	&sensor_dev_attr_temp2_label.dev_attr.attr,
3141 	&sensor_dev_attr_temp3_label.dev_attr.attr,
3142 	&sensor_dev_attr_pwm1.dev_attr.attr,
3143 	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
3144 	&sensor_dev_attr_pwm1_min.dev_attr.attr,
3145 	&sensor_dev_attr_pwm1_max.dev_attr.attr,
3146 	&sensor_dev_attr_fan1_input.dev_attr.attr,
3147 	&sensor_dev_attr_fan1_min.dev_attr.attr,
3148 	&sensor_dev_attr_fan1_max.dev_attr.attr,
3149 	&sensor_dev_attr_fan1_target.dev_attr.attr,
3150 	&sensor_dev_attr_fan1_enable.dev_attr.attr,
3151 	&sensor_dev_attr_in0_input.dev_attr.attr,
3152 	&sensor_dev_attr_in0_label.dev_attr.attr,
3153 	&sensor_dev_attr_in1_input.dev_attr.attr,
3154 	&sensor_dev_attr_in1_label.dev_attr.attr,
3155 	&sensor_dev_attr_power1_average.dev_attr.attr,
3156 	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
3157 	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
3158 	&sensor_dev_attr_power1_cap.dev_attr.attr,
3159 	&sensor_dev_attr_power1_cap_default.dev_attr.attr,
3160 	&sensor_dev_attr_power1_label.dev_attr.attr,
3161 	&sensor_dev_attr_power2_average.dev_attr.attr,
3162 	&sensor_dev_attr_power2_cap_max.dev_attr.attr,
3163 	&sensor_dev_attr_power2_cap_min.dev_attr.attr,
3164 	&sensor_dev_attr_power2_cap.dev_attr.attr,
3165 	&sensor_dev_attr_power2_cap_default.dev_attr.attr,
3166 	&sensor_dev_attr_power2_label.dev_attr.attr,
3167 	&sensor_dev_attr_freq1_input.dev_attr.attr,
3168 	&sensor_dev_attr_freq1_label.dev_attr.attr,
3169 	&sensor_dev_attr_freq2_input.dev_attr.attr,
3170 	&sensor_dev_attr_freq2_label.dev_attr.attr,
3171 	NULL
3172 };
3173 
3174 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3175 					struct attribute *attr, int index)
3176 {
3177 	struct device *dev = kobj_to_dev(kobj);
3178 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3179 	umode_t effective_mode = attr->mode;
3180 
3181 	/* under multi-vf mode, the hwmon attributes are all not supported */
3182 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3183 		return 0;
3184 
3185 	/* under pp one vf mode manage of hwmon attributes is not supported */
3186 	if (amdgpu_sriov_is_pp_one_vf(adev))
3187 		effective_mode &= ~S_IWUSR;
3188 
3189 	/* Skip fan attributes if fan is not present */
3190 	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3191 	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3192 	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3193 	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3194 	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3195 	    attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3196 	    attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3197 	    attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3198 	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3199 		return 0;
3200 
3201 	/* Skip fan attributes on APU */
3202 	if ((adev->flags & AMD_IS_APU) &&
3203 	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3204 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3205 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3206 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3207 	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3208 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3209 	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3210 	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3211 	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3212 		return 0;
3213 
3214 	/* Skip crit temp on APU */
3215 	if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3216 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3217 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3218 		return 0;
3219 
3220 	/* Skip limit attributes if DPM is not enabled */
3221 	if (!adev->pm.dpm_enabled &&
3222 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3223 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3224 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3225 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3226 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3227 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3228 	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3229 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3230 	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3231 	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3232 	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3233 		return 0;
3234 
3235 	/* mask fan attributes if we have no bindings for this asic to expose */
3236 	if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3237 	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3238 	    ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3239 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3240 		effective_mode &= ~S_IRUGO;
3241 
3242 	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3243 	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3244 	      ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3245 	      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3246 		effective_mode &= ~S_IWUSR;
3247 
3248 	if (((adev->family == AMDGPU_FAMILY_SI) ||
3249 		 ((adev->flags & AMD_IS_APU) &&
3250 	      (adev->asic_type != CHIP_VANGOGH))) &&	/* not implemented yet */
3251 	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3252 	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3253 	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3254 	     attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3255 		return 0;
3256 
3257 	if (((adev->family == AMDGPU_FAMILY_SI) ||
3258 	     ((adev->flags & AMD_IS_APU) &&
3259 	      (adev->asic_type < CHIP_RENOIR))) &&	/* not implemented yet */
3260 	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3261 		return 0;
3262 
3263 	/* hide max/min values if we can't both query and manage the fan */
3264 	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3265 	      (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3266 	      (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3267 	      (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3268 	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3269 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3270 		return 0;
3271 
3272 	if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3273 	     (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3274 	     (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3275 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3276 		return 0;
3277 
3278 	if ((adev->family == AMDGPU_FAMILY_SI ||	/* not implemented yet */
3279 	     adev->family == AMDGPU_FAMILY_KV) &&	/* not implemented yet */
3280 	    (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3281 	     attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3282 		return 0;
3283 
3284 	/* only APUs have vddnb */
3285 	if (!(adev->flags & AMD_IS_APU) &&
3286 	    (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3287 	     attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3288 		return 0;
3289 
3290 	/* no mclk on APUs */
3291 	if ((adev->flags & AMD_IS_APU) &&
3292 	    (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3293 	     attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3294 		return 0;
3295 
3296 	/* only SOC15 dGPUs support hotspot and mem temperatures */
3297 	if (((adev->flags & AMD_IS_APU) ||
3298 	     adev->asic_type < CHIP_VEGA10) &&
3299 	    (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3300 	     attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3301 	     attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3302 	     attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3303 	     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3304 	     attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3305 	     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3306 	     attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3307 	     attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3308 	     attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3309 	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3310 		return 0;
3311 
3312 	/* only Vangogh has fast PPT limit and power labels */
3313 	if (!(adev->asic_type == CHIP_VANGOGH) &&
3314 	    (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3315 		 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3316 	     attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3317 		 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3318 		 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3319 		 attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3320 		return 0;
3321 
3322 	return effective_mode;
3323 }
3324 
3325 static const struct attribute_group hwmon_attrgroup = {
3326 	.attrs = hwmon_attributes,
3327 	.is_visible = hwmon_attributes_visible,
3328 };
3329 
3330 static const struct attribute_group *hwmon_groups[] = {
3331 	&hwmon_attrgroup,
3332 	NULL
3333 };
3334 
3335 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3336 {
3337 	int ret;
3338 	uint32_t mask = 0;
3339 
3340 	if (adev->pm.sysfs_initialized)
3341 		return 0;
3342 
3343 	if (adev->pm.dpm_enabled == 0)
3344 		return 0;
3345 
3346 	INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3347 
3348 	adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3349 								   DRIVER_NAME, adev,
3350 								   hwmon_groups);
3351 	if (IS_ERR(adev->pm.int_hwmon_dev)) {
3352 		ret = PTR_ERR(adev->pm.int_hwmon_dev);
3353 		dev_err(adev->dev,
3354 			"Unable to register hwmon device: %d\n", ret);
3355 		return ret;
3356 	}
3357 
3358 	switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3359 	case SRIOV_VF_MODE_ONE_VF:
3360 		mask = ATTR_FLAG_ONEVF;
3361 		break;
3362 	case SRIOV_VF_MODE_MULTI_VF:
3363 		mask = 0;
3364 		break;
3365 	case SRIOV_VF_MODE_BARE_METAL:
3366 	default:
3367 		mask = ATTR_FLAG_MASK_ALL;
3368 		break;
3369 	}
3370 
3371 	ret = amdgpu_device_attr_create_groups(adev,
3372 					       amdgpu_device_attrs,
3373 					       ARRAY_SIZE(amdgpu_device_attrs),
3374 					       mask,
3375 					       &adev->pm.pm_attr_list);
3376 	if (ret)
3377 		return ret;
3378 
3379 	adev->pm.sysfs_initialized = true;
3380 
3381 	return 0;
3382 }
3383 
3384 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3385 {
3386 	if (adev->pm.dpm_enabled == 0)
3387 		return;
3388 
3389 	if (adev->pm.int_hwmon_dev)
3390 		hwmon_device_unregister(adev->pm.int_hwmon_dev);
3391 
3392 	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3393 }
3394 
3395 /*
3396  * Debugfs info
3397  */
3398 #if defined(CONFIG_DEBUG_FS)
3399 
3400 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3401 					   struct amdgpu_device *adev) {
3402 	uint16_t *p_val;
3403 	uint32_t size;
3404 	int i;
3405 	uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
3406 
3407 	if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
3408 		p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
3409 				GFP_KERNEL);
3410 
3411 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3412 					    (void *)p_val, &size)) {
3413 			for (i = 0; i < num_cpu_cores; i++)
3414 				seq_printf(m, "\t%u MHz (CPU%d)\n",
3415 					   *(p_val + i), i);
3416 		}
3417 
3418 		kfree(p_val);
3419 	}
3420 }
3421 
3422 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3423 {
3424 	uint32_t value;
3425 	uint64_t value64 = 0;
3426 	uint32_t query = 0;
3427 	int size;
3428 
3429 	/* GPU Clocks */
3430 	size = sizeof(value);
3431 	seq_printf(m, "GFX Clocks and Power:\n");
3432 
3433 	amdgpu_debugfs_prints_cpu_info(m, adev);
3434 
3435 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3436 		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3437 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3438 		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3439 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3440 		seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3441 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3442 		seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3443 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3444 		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3445 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3446 		seq_printf(m, "\t%u mV (VDDNB)\n", value);
3447 	size = sizeof(uint32_t);
3448 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3449 		seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3450 	size = sizeof(value);
3451 	seq_printf(m, "\n");
3452 
3453 	/* GPU Temp */
3454 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3455 		seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3456 
3457 	/* GPU Load */
3458 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3459 		seq_printf(m, "GPU Load: %u %%\n", value);
3460 	/* MEM Load */
3461 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3462 		seq_printf(m, "MEM Load: %u %%\n", value);
3463 
3464 	seq_printf(m, "\n");
3465 
3466 	/* SMC feature mask */
3467 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3468 		seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3469 
3470 	if (adev->asic_type > CHIP_VEGA20) {
3471 		/* VCN clocks */
3472 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3473 			if (!value) {
3474 				seq_printf(m, "VCN: Disabled\n");
3475 			} else {
3476 				seq_printf(m, "VCN: Enabled\n");
3477 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3478 					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3479 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3480 					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3481 			}
3482 		}
3483 		seq_printf(m, "\n");
3484 	} else {
3485 		/* UVD clocks */
3486 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3487 			if (!value) {
3488 				seq_printf(m, "UVD: Disabled\n");
3489 			} else {
3490 				seq_printf(m, "UVD: Enabled\n");
3491 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3492 					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3493 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3494 					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3495 			}
3496 		}
3497 		seq_printf(m, "\n");
3498 
3499 		/* VCE clocks */
3500 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3501 			if (!value) {
3502 				seq_printf(m, "VCE: Disabled\n");
3503 			} else {
3504 				seq_printf(m, "VCE: Enabled\n");
3505 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3506 					seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3507 			}
3508 		}
3509 	}
3510 
3511 	return 0;
3512 }
3513 
3514 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3515 {
3516 	int i;
3517 
3518 	for (i = 0; clocks[i].flag; i++)
3519 		seq_printf(m, "\t%s: %s\n", clocks[i].name,
3520 			   (flags & clocks[i].flag) ? "On" : "Off");
3521 }
3522 
3523 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
3524 {
3525 	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3526 	struct drm_device *dev = adev_to_drm(adev);
3527 	u32 flags = 0;
3528 	int r;
3529 
3530 	if (amdgpu_in_reset(adev))
3531 		return -EPERM;
3532 	if (adev->in_suspend && !adev->in_runpm)
3533 		return -EPERM;
3534 
3535 	r = pm_runtime_get_sync(dev->dev);
3536 	if (r < 0) {
3537 		pm_runtime_put_autosuspend(dev->dev);
3538 		return r;
3539 	}
3540 
3541 	if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
3542 		r = amdgpu_debugfs_pm_info_pp(m, adev);
3543 		if (r)
3544 			goto out;
3545 	}
3546 
3547 	amdgpu_device_ip_get_clockgating_state(adev, &flags);
3548 
3549 	seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3550 	amdgpu_parse_cg_state(m, flags);
3551 	seq_printf(m, "\n");
3552 
3553 out:
3554 	pm_runtime_mark_last_busy(dev->dev);
3555 	pm_runtime_put_autosuspend(dev->dev);
3556 
3557 	return r;
3558 }
3559 
3560 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3561 
3562 /*
3563  * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
3564  *
3565  * Reads debug memory region allocated to PMFW
3566  */
3567 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
3568 					 size_t size, loff_t *pos)
3569 {
3570 	struct amdgpu_device *adev = file_inode(f)->i_private;
3571 	size_t smu_prv_buf_size;
3572 	void *smu_prv_buf;
3573 	int ret = 0;
3574 
3575 	if (amdgpu_in_reset(adev))
3576 		return -EPERM;
3577 	if (adev->in_suspend && !adev->in_runpm)
3578 		return -EPERM;
3579 
3580 	ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
3581 	if (ret)
3582 		return ret;
3583 
3584 	if (!smu_prv_buf || !smu_prv_buf_size)
3585 		return -EINVAL;
3586 
3587 	return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
3588 				       smu_prv_buf_size);
3589 }
3590 
3591 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
3592 	.owner = THIS_MODULE,
3593 	.open = simple_open,
3594 	.read = amdgpu_pm_prv_buffer_read,
3595 	.llseek = default_llseek,
3596 };
3597 
3598 #endif
3599 
3600 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3601 {
3602 #if defined(CONFIG_DEBUG_FS)
3603 	struct drm_minor *minor = adev_to_drm(adev)->primary;
3604 	struct dentry *root = minor->debugfs_root;
3605 
3606 	if (!adev->pm.dpm_enabled)
3607 		return;
3608 
3609 	debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3610 			    &amdgpu_debugfs_pm_info_fops);
3611 
3612 	if (adev->pm.smu_prv_buffer_size > 0)
3613 		debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
3614 					 adev,
3615 					 &amdgpu_debugfs_pm_prv_buffer_fops,
3616 					 adev->pm.smu_prv_buffer_size);
3617 
3618 	amdgpu_dpm_stb_debug_fs_init(adev);
3619 #endif
3620 }
3621