1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L1
24 
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_smu.h"
30 #include "smu_internal.h"
31 #include "atom.h"
32 #include "arcturus_ppt.h"
33 #include "navi10_ppt.h"
34 #include "sienna_cichlid_ppt.h"
35 #include "renoir_ppt.h"
36 #include "amd_pcie.h"
37 
38 /*
39  * DO NOT use these for err/warn/info/debug messages.
40  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
41  * They are more MGPU friendly.
42  */
43 #undef pr_err
44 #undef pr_warn
45 #undef pr_info
46 #undef pr_debug
47 
48 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
49 {
50 	size_t size = 0;
51 
52 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
53 		return -EOPNOTSUPP;
54 
55 	mutex_lock(&smu->mutex);
56 
57 	size = smu_get_pp_feature_mask(smu, buf);
58 
59 	mutex_unlock(&smu->mutex);
60 
61 	return size;
62 }
63 
64 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
65 {
66 	int ret = 0;
67 
68 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
69 		return -EOPNOTSUPP;
70 
71 	mutex_lock(&smu->mutex);
72 
73 	ret = smu_set_pp_feature_mask(smu, new_mask);
74 
75 	mutex_unlock(&smu->mutex);
76 
77 	return ret;
78 }
79 
80 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
81 {
82 	int ret = 0;
83 	struct smu_context *smu = &adev->smu;
84 
85 	if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
86 		*value = smu_get_gfx_off_status(smu);
87 	else
88 		ret = -EINVAL;
89 
90 	return ret;
91 }
92 
93 int smu_set_soft_freq_range(struct smu_context *smu,
94 			    enum smu_clk_type clk_type,
95 			    uint32_t min,
96 			    uint32_t max)
97 {
98 	int ret = 0;
99 
100 	mutex_lock(&smu->mutex);
101 
102 	if (smu->ppt_funcs->set_soft_freq_limited_range)
103 		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
104 								  clk_type,
105 								  min,
106 								  max);
107 
108 	mutex_unlock(&smu->mutex);
109 
110 	return ret;
111 }
112 
113 int smu_get_dpm_freq_range(struct smu_context *smu,
114 			   enum smu_clk_type clk_type,
115 			   uint32_t *min,
116 			   uint32_t *max)
117 {
118 	int ret = 0;
119 
120 	if (!min && !max)
121 		return -EINVAL;
122 
123 	mutex_lock(&smu->mutex);
124 
125 	if (smu->ppt_funcs->get_dpm_ultimate_freq)
126 		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
127 							    clk_type,
128 							    min,
129 							    max);
130 
131 	mutex_unlock(&smu->mutex);
132 
133 	return ret;
134 }
135 
136 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
137 					 bool enable)
138 {
139 	struct smu_power_context *smu_power = &smu->smu_power;
140 	struct smu_power_gate *power_gate = &smu_power->power_gate;
141 	int ret = 0;
142 
143 	if (!smu->ppt_funcs->dpm_set_vcn_enable)
144 		return 0;
145 
146 	if (atomic_read(&power_gate->vcn_gated) ^ enable)
147 		return 0;
148 
149 	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
150 	if (!ret)
151 		atomic_set(&power_gate->vcn_gated, !enable);
152 
153 	return ret;
154 }
155 
156 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
157 				  bool enable)
158 {
159 	struct smu_power_context *smu_power = &smu->smu_power;
160 	struct smu_power_gate *power_gate = &smu_power->power_gate;
161 	int ret = 0;
162 
163 	mutex_lock(&power_gate->vcn_gate_lock);
164 
165 	ret = smu_dpm_set_vcn_enable_locked(smu, enable);
166 
167 	mutex_unlock(&power_gate->vcn_gate_lock);
168 
169 	return ret;
170 }
171 
172 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
173 					  bool enable)
174 {
175 	struct smu_power_context *smu_power = &smu->smu_power;
176 	struct smu_power_gate *power_gate = &smu_power->power_gate;
177 	int ret = 0;
178 
179 	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
180 		return 0;
181 
182 	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
183 		return 0;
184 
185 	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
186 	if (!ret)
187 		atomic_set(&power_gate->jpeg_gated, !enable);
188 
189 	return ret;
190 }
191 
192 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
193 				   bool enable)
194 {
195 	struct smu_power_context *smu_power = &smu->smu_power;
196 	struct smu_power_gate *power_gate = &smu_power->power_gate;
197 	int ret = 0;
198 
199 	mutex_lock(&power_gate->jpeg_gate_lock);
200 
201 	ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
202 
203 	mutex_unlock(&power_gate->jpeg_gate_lock);
204 
205 	return ret;
206 }
207 
208 /**
209  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
210  *
211  * @smu:        smu_context pointer
212  * @block_type: the IP block to power gate/ungate
213  * @gate:       to power gate if true, ungate otherwise
214  *
215  * This API uses no smu->mutex lock protection due to:
216  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
217  *    This is guarded to be race condition free by the caller.
218  * 2. Or get called on user setting request of power_dpm_force_performance_level.
219  *    Under this case, the smu->mutex lock protection is already enforced on
220  *    the parent API smu_force_performance_level of the call path.
221  */
222 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
223 			   bool gate)
224 {
225 	int ret = 0;
226 
227 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
228 		return -EOPNOTSUPP;
229 
230 	switch (block_type) {
231 	/*
232 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
233 	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
234 	 */
235 	case AMD_IP_BLOCK_TYPE_UVD:
236 	case AMD_IP_BLOCK_TYPE_VCN:
237 		ret = smu_dpm_set_vcn_enable(smu, !gate);
238 		if (ret)
239 			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
240 				gate ? "gate" : "ungate");
241 		break;
242 	case AMD_IP_BLOCK_TYPE_GFX:
243 		ret = smu_gfx_off_control(smu, gate);
244 		if (ret)
245 			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
246 				gate ? "enable" : "disable");
247 		break;
248 	case AMD_IP_BLOCK_TYPE_SDMA:
249 		ret = smu_powergate_sdma(smu, gate);
250 		if (ret)
251 			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
252 				gate ? "gate" : "ungate");
253 		break;
254 	case AMD_IP_BLOCK_TYPE_JPEG:
255 		ret = smu_dpm_set_jpeg_enable(smu, !gate);
256 		if (ret)
257 			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
258 				gate ? "gate" : "ungate");
259 		break;
260 	default:
261 		dev_err(smu->adev->dev, "Unsupported block type!\n");
262 		return -EINVAL;
263 	}
264 
265 	return ret;
266 }
267 
268 int smu_get_power_num_states(struct smu_context *smu,
269 			     struct pp_states_info *state_info)
270 {
271 	if (!state_info)
272 		return -EINVAL;
273 
274 	/* not support power state */
275 	memset(state_info, 0, sizeof(struct pp_states_info));
276 	state_info->nums = 1;
277 	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
278 
279 	return 0;
280 }
281 
282 bool is_support_sw_smu(struct amdgpu_device *adev)
283 {
284 	if (adev->asic_type >= CHIP_ARCTURUS)
285 		return true;
286 
287 	return false;
288 }
289 
290 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
291 {
292 	struct smu_table_context *smu_table = &smu->smu_table;
293 	uint32_t powerplay_table_size;
294 
295 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
296 		return -EOPNOTSUPP;
297 
298 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
299 		return -EINVAL;
300 
301 	mutex_lock(&smu->mutex);
302 
303 	if (smu_table->hardcode_pptable)
304 		*table = smu_table->hardcode_pptable;
305 	else
306 		*table = smu_table->power_play_table;
307 
308 	powerplay_table_size = smu_table->power_play_table_size;
309 
310 	mutex_unlock(&smu->mutex);
311 
312 	return powerplay_table_size;
313 }
314 
315 int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
316 {
317 	struct smu_table_context *smu_table = &smu->smu_table;
318 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
319 	int ret = 0;
320 
321 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
322 		return -EOPNOTSUPP;
323 
324 	if (header->usStructureSize != size) {
325 		dev_err(smu->adev->dev, "pp table size not matched !\n");
326 		return -EIO;
327 	}
328 
329 	mutex_lock(&smu->mutex);
330 	if (!smu_table->hardcode_pptable)
331 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
332 	if (!smu_table->hardcode_pptable) {
333 		ret = -ENOMEM;
334 		goto failed;
335 	}
336 
337 	memcpy(smu_table->hardcode_pptable, buf, size);
338 	smu_table->power_play_table = smu_table->hardcode_pptable;
339 	smu_table->power_play_table_size = size;
340 
341 	/*
342 	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
343 	 * skipped) may be needed for custom pptable uploading.
344 	 */
345 	smu->uploading_custom_pp_table = true;
346 
347 	ret = smu_reset(smu);
348 	if (ret)
349 		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
350 
351 	smu->uploading_custom_pp_table = false;
352 
353 failed:
354 	mutex_unlock(&smu->mutex);
355 	return ret;
356 }
357 
358 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
359 {
360 	struct smu_feature *feature = &smu->smu_feature;
361 	int ret = 0;
362 	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
363 
364 	mutex_lock(&feature->mutex);
365 	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
366 	mutex_unlock(&feature->mutex);
367 
368 	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
369 					     SMU_FEATURE_MAX/32);
370 	if (ret)
371 		return ret;
372 
373 	mutex_lock(&feature->mutex);
374 	bitmap_or(feature->allowed, feature->allowed,
375 		      (unsigned long *)allowed_feature_mask,
376 		      feature->feature_num);
377 	mutex_unlock(&feature->mutex);
378 
379 	return ret;
380 }
381 
382 static int smu_set_funcs(struct amdgpu_device *adev)
383 {
384 	struct smu_context *smu = &adev->smu;
385 
386 	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
387 		smu->od_enabled = true;
388 
389 	switch (adev->asic_type) {
390 	case CHIP_NAVI10:
391 	case CHIP_NAVI14:
392 	case CHIP_NAVI12:
393 		navi10_set_ppt_funcs(smu);
394 		break;
395 	case CHIP_ARCTURUS:
396 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
397 		arcturus_set_ppt_funcs(smu);
398 		/* OD is not supported on Arcturus */
399 		smu->od_enabled =false;
400 		break;
401 	case CHIP_SIENNA_CICHLID:
402 	case CHIP_NAVY_FLOUNDER:
403 		sienna_cichlid_set_ppt_funcs(smu);
404 		break;
405 	case CHIP_RENOIR:
406 		renoir_set_ppt_funcs(smu);
407 		break;
408 	default:
409 		return -EINVAL;
410 	}
411 
412 	return 0;
413 }
414 
415 static int smu_early_init(void *handle)
416 {
417 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
418 	struct smu_context *smu = &adev->smu;
419 
420 	smu->adev = adev;
421 	smu->pm_enabled = !!amdgpu_dpm;
422 	smu->is_apu = false;
423 	mutex_init(&smu->mutex);
424 
425 	return smu_set_funcs(adev);
426 }
427 
428 static int smu_set_default_dpm_table(struct smu_context *smu)
429 {
430 	struct smu_power_context *smu_power = &smu->smu_power;
431 	struct smu_power_gate *power_gate = &smu_power->power_gate;
432 	int vcn_gate, jpeg_gate;
433 	int ret = 0;
434 
435 	if (!smu->ppt_funcs->set_default_dpm_table)
436 		return 0;
437 
438 	mutex_lock(&power_gate->vcn_gate_lock);
439 	mutex_lock(&power_gate->jpeg_gate_lock);
440 
441 	vcn_gate = atomic_read(&power_gate->vcn_gated);
442 	jpeg_gate = atomic_read(&power_gate->jpeg_gated);
443 
444 	ret = smu_dpm_set_vcn_enable_locked(smu, true);
445 	if (ret)
446 		goto err0_out;
447 
448 	ret = smu_dpm_set_jpeg_enable_locked(smu, true);
449 	if (ret)
450 		goto err1_out;
451 
452 	ret = smu->ppt_funcs->set_default_dpm_table(smu);
453 	if (ret)
454 		dev_err(smu->adev->dev,
455 			"Failed to setup default dpm clock tables!\n");
456 
457 	smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
458 err1_out:
459 	smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
460 err0_out:
461 	mutex_unlock(&power_gate->jpeg_gate_lock);
462 	mutex_unlock(&power_gate->vcn_gate_lock);
463 
464 	return ret;
465 }
466 
467 static int smu_late_init(void *handle)
468 {
469 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
470 	struct smu_context *smu = &adev->smu;
471 	int ret = 0;
472 
473 	if (!smu->pm_enabled)
474 		return 0;
475 
476 	ret = smu_set_default_od_settings(smu);
477 	if (ret) {
478 		dev_err(adev->dev, "Failed to setup default OD settings!\n");
479 		return ret;
480 	}
481 
482 	/*
483 	 * Set initialized values (get from vbios) to dpm tables context such as
484 	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
485 	 * type of clks.
486 	 */
487 	ret = smu_set_default_dpm_table(smu);
488 	if (ret) {
489 		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
490 		return ret;
491 	}
492 
493 	ret = smu_populate_umd_state_clk(smu);
494 	if (ret) {
495 		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
496 		return ret;
497 	}
498 
499 	ret = smu_get_asic_power_limits(smu);
500 	if (ret) {
501 		dev_err(adev->dev, "Failed to get asic power limits!\n");
502 		return ret;
503 	}
504 
505 	smu_get_unique_id(smu);
506 
507 	smu_get_fan_parameters(smu);
508 
509 	smu_handle_task(&adev->smu,
510 			smu->smu_dpm.dpm_level,
511 			AMD_PP_TASK_COMPLETE_INIT,
512 			false);
513 
514 	return 0;
515 }
516 
517 static int smu_init_fb_allocations(struct smu_context *smu)
518 {
519 	struct amdgpu_device *adev = smu->adev;
520 	struct smu_table_context *smu_table = &smu->smu_table;
521 	struct smu_table *tables = smu_table->tables;
522 	struct smu_table *driver_table = &(smu_table->driver_table);
523 	uint32_t max_table_size = 0;
524 	int ret, i;
525 
526 	/* VRAM allocation for tool table */
527 	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
528 		ret = amdgpu_bo_create_kernel(adev,
529 					      tables[SMU_TABLE_PMSTATUSLOG].size,
530 					      tables[SMU_TABLE_PMSTATUSLOG].align,
531 					      tables[SMU_TABLE_PMSTATUSLOG].domain,
532 					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
533 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
534 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
535 		if (ret) {
536 			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
537 			return ret;
538 		}
539 	}
540 
541 	/* VRAM allocation for driver table */
542 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
543 		if (tables[i].size == 0)
544 			continue;
545 
546 		if (i == SMU_TABLE_PMSTATUSLOG)
547 			continue;
548 
549 		if (max_table_size < tables[i].size)
550 			max_table_size = tables[i].size;
551 	}
552 
553 	driver_table->size = max_table_size;
554 	driver_table->align = PAGE_SIZE;
555 	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
556 
557 	ret = amdgpu_bo_create_kernel(adev,
558 				      driver_table->size,
559 				      driver_table->align,
560 				      driver_table->domain,
561 				      &driver_table->bo,
562 				      &driver_table->mc_address,
563 				      &driver_table->cpu_addr);
564 	if (ret) {
565 		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
566 		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
567 			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
568 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
569 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
570 	}
571 
572 	return ret;
573 }
574 
575 static int smu_fini_fb_allocations(struct smu_context *smu)
576 {
577 	struct smu_table_context *smu_table = &smu->smu_table;
578 	struct smu_table *tables = smu_table->tables;
579 	struct smu_table *driver_table = &(smu_table->driver_table);
580 
581 	if (!tables)
582 		return 0;
583 
584 	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
585 		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
586 				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
587 				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
588 
589 	amdgpu_bo_free_kernel(&driver_table->bo,
590 			      &driver_table->mc_address,
591 			      &driver_table->cpu_addr);
592 
593 	return 0;
594 }
595 
596 /**
597  * smu_alloc_memory_pool - allocate memory pool in the system memory
598  *
599  * @smu: amdgpu_device pointer
600  *
601  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
602  * and DramLogSetDramAddr can notify it changed.
603  *
604  * Returns 0 on success, error on failure.
605  */
606 static int smu_alloc_memory_pool(struct smu_context *smu)
607 {
608 	struct amdgpu_device *adev = smu->adev;
609 	struct smu_table_context *smu_table = &smu->smu_table;
610 	struct smu_table *memory_pool = &smu_table->memory_pool;
611 	uint64_t pool_size = smu->pool_size;
612 	int ret = 0;
613 
614 	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
615 		return ret;
616 
617 	memory_pool->size = pool_size;
618 	memory_pool->align = PAGE_SIZE;
619 	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
620 
621 	switch (pool_size) {
622 	case SMU_MEMORY_POOL_SIZE_256_MB:
623 	case SMU_MEMORY_POOL_SIZE_512_MB:
624 	case SMU_MEMORY_POOL_SIZE_1_GB:
625 	case SMU_MEMORY_POOL_SIZE_2_GB:
626 		ret = amdgpu_bo_create_kernel(adev,
627 					      memory_pool->size,
628 					      memory_pool->align,
629 					      memory_pool->domain,
630 					      &memory_pool->bo,
631 					      &memory_pool->mc_address,
632 					      &memory_pool->cpu_addr);
633 		if (ret)
634 			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
635 		break;
636 	default:
637 		break;
638 	}
639 
640 	return ret;
641 }
642 
643 static int smu_free_memory_pool(struct smu_context *smu)
644 {
645 	struct smu_table_context *smu_table = &smu->smu_table;
646 	struct smu_table *memory_pool = &smu_table->memory_pool;
647 
648 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
649 		return 0;
650 
651 	amdgpu_bo_free_kernel(&memory_pool->bo,
652 			      &memory_pool->mc_address,
653 			      &memory_pool->cpu_addr);
654 
655 	memset(memory_pool, 0, sizeof(struct smu_table));
656 
657 	return 0;
658 }
659 
660 static int smu_smc_table_sw_init(struct smu_context *smu)
661 {
662 	int ret;
663 
664 	/**
665 	 * Create smu_table structure, and init smc tables such as
666 	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
667 	 */
668 	ret = smu_init_smc_tables(smu);
669 	if (ret) {
670 		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
671 		return ret;
672 	}
673 
674 	/**
675 	 * Create smu_power_context structure, and allocate smu_dpm_context and
676 	 * context size to fill the smu_power_context data.
677 	 */
678 	ret = smu_init_power(smu);
679 	if (ret) {
680 		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
681 		return ret;
682 	}
683 
684 	/*
685 	 * allocate vram bos to store smc table contents.
686 	 */
687 	ret = smu_init_fb_allocations(smu);
688 	if (ret)
689 		return ret;
690 
691 	ret = smu_alloc_memory_pool(smu);
692 	if (ret)
693 		return ret;
694 
695 	ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
696 	if (ret)
697 		return ret;
698 
699 	return 0;
700 }
701 
702 static int smu_smc_table_sw_fini(struct smu_context *smu)
703 {
704 	int ret;
705 
706 	smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
707 
708 	ret = smu_free_memory_pool(smu);
709 	if (ret)
710 		return ret;
711 
712 	ret = smu_fini_fb_allocations(smu);
713 	if (ret)
714 		return ret;
715 
716 	ret = smu_fini_power(smu);
717 	if (ret) {
718 		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
719 		return ret;
720 	}
721 
722 	ret = smu_fini_smc_tables(smu);
723 	if (ret) {
724 		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
725 		return ret;
726 	}
727 
728 	return 0;
729 }
730 
731 static void smu_throttling_logging_work_fn(struct work_struct *work)
732 {
733 	struct smu_context *smu = container_of(work, struct smu_context,
734 					       throttling_logging_work);
735 
736 	smu_log_thermal_throttling(smu);
737 }
738 
739 static int smu_sw_init(void *handle)
740 {
741 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
742 	struct smu_context *smu = &adev->smu;
743 	int ret;
744 
745 	smu->pool_size = adev->pm.smu_prv_buffer_size;
746 	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
747 	mutex_init(&smu->smu_feature.mutex);
748 	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
749 	bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
750 	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
751 
752 	mutex_init(&smu->smu_baco.mutex);
753 	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
754 	smu->smu_baco.platform_support = false;
755 
756 	mutex_init(&smu->sensor_lock);
757 	mutex_init(&smu->metrics_lock);
758 	mutex_init(&smu->message_lock);
759 
760 	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
761 	atomic64_set(&smu->throttle_int_counter, 0);
762 	smu->watermarks_bitmap = 0;
763 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
764 	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
765 
766 	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
767 	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
768 	mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
769 	mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
770 
771 	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
772 	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
773 	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
774 	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
775 	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
776 	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
777 	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
778 	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
779 
780 	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
781 	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
782 	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
783 	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
784 	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
785 	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
786 	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
787 	smu->display_config = &adev->pm.pm_display_cfg;
788 
789 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
790 	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
791 	ret = smu_init_microcode(smu);
792 	if (ret) {
793 		dev_err(adev->dev, "Failed to load smu firmware!\n");
794 		return ret;
795 	}
796 
797 	ret = smu_smc_table_sw_init(smu);
798 	if (ret) {
799 		dev_err(adev->dev, "Failed to sw init smc table!\n");
800 		return ret;
801 	}
802 
803 	ret = smu_register_irq_handler(smu);
804 	if (ret) {
805 		dev_err(adev->dev, "Failed to register smc irq handler!\n");
806 		return ret;
807 	}
808 
809 	return 0;
810 }
811 
812 static int smu_sw_fini(void *handle)
813 {
814 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
815 	struct smu_context *smu = &adev->smu;
816 	int ret;
817 
818 	ret = smu_smc_table_sw_fini(smu);
819 	if (ret) {
820 		dev_err(adev->dev, "Failed to sw fini smc table!\n");
821 		return ret;
822 	}
823 
824 	smu_fini_microcode(smu);
825 
826 	return 0;
827 }
828 
829 static int smu_get_thermal_temperature_range(struct smu_context *smu)
830 {
831 	struct amdgpu_device *adev = smu->adev;
832 	struct smu_temperature_range *range =
833 				&smu->thermal_range;
834 	int ret = 0;
835 
836 	if (!smu->ppt_funcs->get_thermal_temperature_range)
837 		return 0;
838 
839 	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
840 	if (ret)
841 		return ret;
842 
843 	adev->pm.dpm.thermal.min_temp = range->min;
844 	adev->pm.dpm.thermal.max_temp = range->max;
845 	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
846 	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
847 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
848 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
849 	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
850 	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
851 	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
852 
853 	return ret;
854 }
855 
856 static int smu_smc_hw_setup(struct smu_context *smu)
857 {
858 	struct amdgpu_device *adev = smu->adev;
859 	uint32_t pcie_gen = 0, pcie_width = 0;
860 	int ret;
861 
862 	if (adev->in_suspend && smu_is_dpm_running(smu)) {
863 		dev_info(adev->dev, "dpm has been enabled\n");
864 		return 0;
865 	}
866 
867 	ret = smu_init_display_count(smu, 0);
868 	if (ret) {
869 		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
870 		return ret;
871 	}
872 
873 	ret = smu_set_driver_table_location(smu);
874 	if (ret) {
875 		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
876 		return ret;
877 	}
878 
879 	/*
880 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
881 	 */
882 	ret = smu_set_tool_table_location(smu);
883 	if (ret) {
884 		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
885 		return ret;
886 	}
887 
888 	/*
889 	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
890 	 * pool location.
891 	 */
892 	ret = smu_notify_memory_pool_location(smu);
893 	if (ret) {
894 		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
895 		return ret;
896 	}
897 
898 	/* smu_dump_pptable(smu); */
899 	/*
900 	 * Copy pptable bo in the vram to smc with SMU MSGs such as
901 	 * SetDriverDramAddr and TransferTableDram2Smu.
902 	 */
903 	ret = smu_write_pptable(smu);
904 	if (ret) {
905 		dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
906 		return ret;
907 	}
908 
909 	/* issue Run*Btc msg */
910 	ret = smu_run_btc(smu);
911 	if (ret)
912 		return ret;
913 
914 	ret = smu_feature_set_allowed_mask(smu);
915 	if (ret) {
916 		dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
917 		return ret;
918 	}
919 
920 	ret = smu_system_features_control(smu, true);
921 	if (ret) {
922 		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
923 		return ret;
924 	}
925 
926 	if (!smu_is_dpm_running(smu))
927 		dev_info(adev->dev, "dpm has been disabled\n");
928 
929 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
930 		pcie_gen = 3;
931 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
932 		pcie_gen = 2;
933 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
934 		pcie_gen = 1;
935 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
936 		pcie_gen = 0;
937 
938 	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
939 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
940 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
941 	 */
942 	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
943 		pcie_width = 6;
944 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
945 		pcie_width = 5;
946 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
947 		pcie_width = 4;
948 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
949 		pcie_width = 3;
950 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
951 		pcie_width = 2;
952 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
953 		pcie_width = 1;
954 	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
955 	if (ret) {
956 		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
957 		return ret;
958 	}
959 
960 	ret = smu_get_thermal_temperature_range(smu);
961 	if (ret) {
962 		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
963 		return ret;
964 	}
965 
966 	ret = smu_enable_thermal_alert(smu);
967 	if (ret) {
968 		dev_err(adev->dev, "Failed to enable thermal alert!\n");
969 		return ret;
970 	}
971 
972 	ret = smu_disable_umc_cdr_12gbps_workaround(smu);
973 	if (ret) {
974 		dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
975 		return ret;
976 	}
977 
978 	/*
979 	 * For Navi1X, manually switch it to AC mode as PMFW
980 	 * may boot it with DC mode.
981 	 */
982 	ret = smu_set_power_source(smu,
983 				   adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
984 				   SMU_POWER_SOURCE_DC);
985 	if (ret) {
986 		dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
987 		return ret;
988 	}
989 
990 	ret = smu_notify_display_change(smu);
991 	if (ret)
992 		return ret;
993 
994 	/*
995 	 * Set min deep sleep dce fclk with bootup value from vbios via
996 	 * SetMinDeepSleepDcefclk MSG.
997 	 */
998 	ret = smu_set_min_dcef_deep_sleep(smu,
999 					  smu->smu_table.boot_values.dcefclk / 100);
1000 	if (ret)
1001 		return ret;
1002 
1003 	return ret;
1004 }
1005 
1006 static int smu_start_smc_engine(struct smu_context *smu)
1007 {
1008 	struct amdgpu_device *adev = smu->adev;
1009 	int ret = 0;
1010 
1011 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1012 		if (adev->asic_type < CHIP_NAVI10) {
1013 			if (smu->ppt_funcs->load_microcode) {
1014 				ret = smu->ppt_funcs->load_microcode(smu);
1015 				if (ret)
1016 					return ret;
1017 			}
1018 		}
1019 	}
1020 
1021 	if (smu->ppt_funcs->check_fw_status) {
1022 		ret = smu->ppt_funcs->check_fw_status(smu);
1023 		if (ret) {
1024 			dev_err(adev->dev, "SMC is not ready\n");
1025 			return ret;
1026 		}
1027 	}
1028 
1029 	/*
1030 	 * Send msg GetDriverIfVersion to check if the return value is equal
1031 	 * with DRIVER_IF_VERSION of smc header.
1032 	 */
1033 	ret = smu_check_fw_version(smu);
1034 	if (ret)
1035 		return ret;
1036 
1037 	return ret;
1038 }
1039 
1040 static int smu_hw_init(void *handle)
1041 {
1042 	int ret;
1043 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1044 	struct smu_context *smu = &adev->smu;
1045 
1046 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1047 		smu->pm_enabled = false;
1048 		return 0;
1049 	}
1050 
1051 	ret = smu_start_smc_engine(smu);
1052 	if (ret) {
1053 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1054 		return ret;
1055 	}
1056 
1057 	if (smu->is_apu) {
1058 		smu_powergate_sdma(&adev->smu, false);
1059 		smu_dpm_set_vcn_enable(smu, true);
1060 		smu_dpm_set_jpeg_enable(smu, true);
1061 		smu_set_gfx_cgpg(&adev->smu, true);
1062 	}
1063 
1064 	if (!smu->pm_enabled)
1065 		return 0;
1066 
1067 	/* get boot_values from vbios to set revision, gfxclk, and etc. */
1068 	ret = smu_get_vbios_bootup_values(smu);
1069 	if (ret) {
1070 		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1071 		return ret;
1072 	}
1073 
1074 	ret = smu_setup_pptable(smu);
1075 	if (ret) {
1076 		dev_err(adev->dev, "Failed to setup pptable!\n");
1077 		return ret;
1078 	}
1079 
1080 	ret = smu_get_driver_allowed_feature_mask(smu);
1081 	if (ret)
1082 		return ret;
1083 
1084 	ret = smu_smc_hw_setup(smu);
1085 	if (ret) {
1086 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1087 		return ret;
1088 	}
1089 
1090 	/*
1091 	 * Move maximum sustainable clock retrieving here considering
1092 	 * 1. It is not needed on resume(from S3).
1093 	 * 2. DAL settings come between .hw_init and .late_init of SMU.
1094 	 *    And DAL needs to know the maximum sustainable clocks. Thus
1095 	 *    it cannot be put in .late_init().
1096 	 */
1097 	ret = smu_init_max_sustainable_clocks(smu);
1098 	if (ret) {
1099 		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1100 		return ret;
1101 	}
1102 
1103 	adev->pm.dpm_enabled = true;
1104 
1105 	dev_info(adev->dev, "SMU is initialized successfully!\n");
1106 
1107 	return 0;
1108 }
1109 
1110 static int smu_disable_dpms(struct smu_context *smu)
1111 {
1112 	struct amdgpu_device *adev = smu->adev;
1113 	int ret = 0;
1114 	bool use_baco = !smu->is_apu &&
1115 		((amdgpu_in_reset(adev) &&
1116 		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1117 		 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
1118 
1119 	/*
1120 	 * For custom pptable uploading, skip the DPM features
1121 	 * disable process on Navi1x ASICs.
1122 	 *   - As the gfx related features are under control of
1123 	 *     RLC on those ASICs. RLC reinitialization will be
1124 	 *     needed to reenable them. That will cost much more
1125 	 *     efforts.
1126 	 *
1127 	 *   - SMU firmware can handle the DPM reenablement
1128 	 *     properly.
1129 	 */
1130 	if (smu->uploading_custom_pp_table &&
1131 	    (adev->asic_type >= CHIP_NAVI10) &&
1132 	    (adev->asic_type <= CHIP_NAVY_FLOUNDER))
1133 		return 0;
1134 
1135 	/*
1136 	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1137 	 * on BACO in. Driver involvement is unnecessary.
1138 	 */
1139 	if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
1140 	     use_baco)
1141 		return 0;
1142 
1143 	/*
1144 	 * For gpu reset, runpm and hibernation through BACO,
1145 	 * BACO feature has to be kept enabled.
1146 	 */
1147 	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1148 		ret = smu_disable_all_features_with_exception(smu,
1149 							      SMU_FEATURE_BACO_BIT);
1150 		if (ret)
1151 			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1152 	} else {
1153 		ret = smu_system_features_control(smu, false);
1154 		if (ret)
1155 			dev_err(adev->dev, "Failed to disable smu features.\n");
1156 	}
1157 
1158 	if (adev->asic_type >= CHIP_NAVI10 &&
1159 	    adev->gfx.rlc.funcs->stop)
1160 		adev->gfx.rlc.funcs->stop(adev);
1161 
1162 	return ret;
1163 }
1164 
1165 static int smu_smc_hw_cleanup(struct smu_context *smu)
1166 {
1167 	struct amdgpu_device *adev = smu->adev;
1168 	int ret = 0;
1169 
1170 	cancel_work_sync(&smu->throttling_logging_work);
1171 
1172 	ret = smu_disable_thermal_alert(smu);
1173 	if (ret) {
1174 		dev_err(adev->dev, "Fail to disable thermal alert!\n");
1175 		return ret;
1176 	}
1177 
1178 	ret = smu_disable_dpms(smu);
1179 	if (ret) {
1180 		dev_err(adev->dev, "Fail to disable dpm features!\n");
1181 		return ret;
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static int smu_hw_fini(void *handle)
1188 {
1189 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1190 	struct smu_context *smu = &adev->smu;
1191 	int ret = 0;
1192 
1193 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1194 		return 0;
1195 
1196 	if (smu->is_apu) {
1197 		smu_powergate_sdma(&adev->smu, true);
1198 		smu_dpm_set_vcn_enable(smu, false);
1199 		smu_dpm_set_jpeg_enable(smu, false);
1200 	}
1201 
1202 	if (!smu->pm_enabled)
1203 		return 0;
1204 
1205 	adev->pm.dpm_enabled = false;
1206 
1207 	ret = smu_smc_hw_cleanup(smu);
1208 	if (ret)
1209 		return ret;
1210 
1211 	return 0;
1212 }
1213 
1214 int smu_reset(struct smu_context *smu)
1215 {
1216 	struct amdgpu_device *adev = smu->adev;
1217 	int ret;
1218 
1219 	amdgpu_gfx_off_ctrl(smu->adev, false);
1220 
1221 	ret = smu_hw_fini(adev);
1222 	if (ret)
1223 		return ret;
1224 
1225 	ret = smu_hw_init(adev);
1226 	if (ret)
1227 		return ret;
1228 
1229 	ret = smu_late_init(adev);
1230 	if (ret)
1231 		return ret;
1232 
1233 	amdgpu_gfx_off_ctrl(smu->adev, true);
1234 
1235 	return 0;
1236 }
1237 
1238 static int smu_suspend(void *handle)
1239 {
1240 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1241 	struct smu_context *smu = &adev->smu;
1242 	int ret;
1243 
1244 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1245 		return 0;
1246 
1247 	if (!smu->pm_enabled)
1248 		return 0;
1249 
1250 	adev->pm.dpm_enabled = false;
1251 
1252 	ret = smu_smc_hw_cleanup(smu);
1253 	if (ret)
1254 		return ret;
1255 
1256 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1257 
1258 	if (smu->is_apu)
1259 		smu_set_gfx_cgpg(&adev->smu, false);
1260 
1261 	return 0;
1262 }
1263 
1264 static int smu_resume(void *handle)
1265 {
1266 	int ret;
1267 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1268 	struct smu_context *smu = &adev->smu;
1269 
1270 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1271 		return 0;
1272 
1273 	if (!smu->pm_enabled)
1274 		return 0;
1275 
1276 	dev_info(adev->dev, "SMU is resuming...\n");
1277 
1278 	ret = smu_start_smc_engine(smu);
1279 	if (ret) {
1280 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1281 		return ret;
1282 	}
1283 
1284 	ret = smu_smc_hw_setup(smu);
1285 	if (ret) {
1286 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1287 		return ret;
1288 	}
1289 
1290 	if (smu->is_apu)
1291 		smu_set_gfx_cgpg(&adev->smu, true);
1292 
1293 	smu->disable_uclk_switch = 0;
1294 
1295 	adev->pm.dpm_enabled = true;
1296 
1297 	dev_info(adev->dev, "SMU is resumed successfully!\n");
1298 
1299 	return 0;
1300 }
1301 
1302 int smu_display_configuration_change(struct smu_context *smu,
1303 				     const struct amd_pp_display_configuration *display_config)
1304 {
1305 	int index = 0;
1306 	int num_of_active_display = 0;
1307 
1308 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1309 		return -EOPNOTSUPP;
1310 
1311 	if (!display_config)
1312 		return -EINVAL;
1313 
1314 	mutex_lock(&smu->mutex);
1315 
1316 	smu_set_min_dcef_deep_sleep(smu,
1317 				    display_config->min_dcef_deep_sleep_set_clk / 100);
1318 
1319 	for (index = 0; index < display_config->num_path_including_non_display; index++) {
1320 		if (display_config->displays[index].controller_id != 0)
1321 			num_of_active_display++;
1322 	}
1323 
1324 	smu_set_active_display_count(smu, num_of_active_display);
1325 
1326 	smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1327 			   display_config->cpu_cc6_disable,
1328 			   display_config->cpu_pstate_disable,
1329 			   display_config->nb_pstate_switch_disable);
1330 
1331 	mutex_unlock(&smu->mutex);
1332 
1333 	return 0;
1334 }
1335 
1336 static int smu_get_clock_info(struct smu_context *smu,
1337 			      struct smu_clock_info *clk_info,
1338 			      enum smu_perf_level_designation designation)
1339 {
1340 	int ret;
1341 	struct smu_performance_level level = {0};
1342 
1343 	if (!clk_info)
1344 		return -EINVAL;
1345 
1346 	ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1347 	if (ret)
1348 		return -EINVAL;
1349 
1350 	clk_info->min_mem_clk = level.memory_clock;
1351 	clk_info->min_eng_clk = level.core_clock;
1352 	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1353 
1354 	ret = smu_get_perf_level(smu, designation, &level);
1355 	if (ret)
1356 		return -EINVAL;
1357 
1358 	clk_info->min_mem_clk = level.memory_clock;
1359 	clk_info->min_eng_clk = level.core_clock;
1360 	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1361 
1362 	return 0;
1363 }
1364 
1365 int smu_get_current_clocks(struct smu_context *smu,
1366 			   struct amd_pp_clock_info *clocks)
1367 {
1368 	struct amd_pp_simple_clock_info simple_clocks = {0};
1369 	struct smu_clock_info hw_clocks;
1370 	int ret = 0;
1371 
1372 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1373 		return -EOPNOTSUPP;
1374 
1375 	mutex_lock(&smu->mutex);
1376 
1377 	smu_get_dal_power_level(smu, &simple_clocks);
1378 
1379 	if (smu->support_power_containment)
1380 		ret = smu_get_clock_info(smu, &hw_clocks,
1381 					 PERF_LEVEL_POWER_CONTAINMENT);
1382 	else
1383 		ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1384 
1385 	if (ret) {
1386 		dev_err(smu->adev->dev, "Error in smu_get_clock_info\n");
1387 		goto failed;
1388 	}
1389 
1390 	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1391 	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1392 	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1393 	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1394 	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1395 	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1396 	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1397 	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1398 
1399         if (simple_clocks.level == 0)
1400                 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1401         else
1402                 clocks->max_clocks_state = simple_clocks.level;
1403 
1404         if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1405                 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1406                 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1407         }
1408 
1409 failed:
1410 	mutex_unlock(&smu->mutex);
1411 	return ret;
1412 }
1413 
1414 static int smu_set_clockgating_state(void *handle,
1415 				     enum amd_clockgating_state state)
1416 {
1417 	return 0;
1418 }
1419 
1420 static int smu_set_powergating_state(void *handle,
1421 				     enum amd_powergating_state state)
1422 {
1423 	return 0;
1424 }
1425 
1426 static int smu_enable_umd_pstate(void *handle,
1427 		      enum amd_dpm_forced_level *level)
1428 {
1429 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1430 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1431 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1432 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1433 
1434 	struct smu_context *smu = (struct smu_context*)(handle);
1435 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1436 
1437 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1438 		return -EINVAL;
1439 
1440 	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1441 		/* enter umd pstate, save current level, disable gfx cg*/
1442 		if (*level & profile_mode_mask) {
1443 			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1444 			smu_dpm_ctx->enable_umd_pstate = true;
1445 			amdgpu_device_ip_set_powergating_state(smu->adev,
1446 							       AMD_IP_BLOCK_TYPE_GFX,
1447 							       AMD_PG_STATE_UNGATE);
1448 			amdgpu_device_ip_set_clockgating_state(smu->adev,
1449 							       AMD_IP_BLOCK_TYPE_GFX,
1450 							       AMD_CG_STATE_UNGATE);
1451 			smu_gfx_ulv_control(smu, false);
1452 			smu_deep_sleep_control(smu, false);
1453 		}
1454 	} else {
1455 		/* exit umd pstate, restore level, enable gfx cg*/
1456 		if (!(*level & profile_mode_mask)) {
1457 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1458 				*level = smu_dpm_ctx->saved_dpm_level;
1459 			smu_dpm_ctx->enable_umd_pstate = false;
1460 			smu_deep_sleep_control(smu, true);
1461 			smu_gfx_ulv_control(smu, true);
1462 			amdgpu_device_ip_set_clockgating_state(smu->adev,
1463 							       AMD_IP_BLOCK_TYPE_GFX,
1464 							       AMD_CG_STATE_GATE);
1465 			amdgpu_device_ip_set_powergating_state(smu->adev,
1466 							       AMD_IP_BLOCK_TYPE_GFX,
1467 							       AMD_PG_STATE_GATE);
1468 		}
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1475 				   enum amd_dpm_forced_level level,
1476 				   bool skip_display_settings)
1477 {
1478 	int ret = 0;
1479 	int index = 0;
1480 	long workload;
1481 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1482 
1483 	if (!skip_display_settings) {
1484 		ret = smu_display_config_changed(smu);
1485 		if (ret) {
1486 			dev_err(smu->adev->dev, "Failed to change display config!");
1487 			return ret;
1488 		}
1489 	}
1490 
1491 	ret = smu_apply_clocks_adjust_rules(smu);
1492 	if (ret) {
1493 		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1494 		return ret;
1495 	}
1496 
1497 	if (!skip_display_settings) {
1498 		ret = smu_notify_smc_display_config(smu);
1499 		if (ret) {
1500 			dev_err(smu->adev->dev, "Failed to notify smc display config!");
1501 			return ret;
1502 		}
1503 	}
1504 
1505 	if (smu_dpm_ctx->dpm_level != level) {
1506 		ret = smu_asic_set_performance_level(smu, level);
1507 		if (ret) {
1508 			dev_err(smu->adev->dev, "Failed to set performance level!");
1509 			return ret;
1510 		}
1511 
1512 		/* update the saved copy */
1513 		smu_dpm_ctx->dpm_level = level;
1514 	}
1515 
1516 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1517 		index = fls(smu->workload_mask);
1518 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1519 		workload = smu->workload_setting[index];
1520 
1521 		if (smu->power_profile_mode != workload)
1522 			smu_set_power_profile_mode(smu, &workload, 0, false);
1523 	}
1524 
1525 	return ret;
1526 }
1527 
1528 int smu_handle_task(struct smu_context *smu,
1529 		    enum amd_dpm_forced_level level,
1530 		    enum amd_pp_task task_id,
1531 		    bool lock_needed)
1532 {
1533 	int ret = 0;
1534 
1535 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1536 		return -EOPNOTSUPP;
1537 
1538 	if (lock_needed)
1539 		mutex_lock(&smu->mutex);
1540 
1541 	switch (task_id) {
1542 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1543 		ret = smu_pre_display_config_changed(smu);
1544 		if (ret)
1545 			goto out;
1546 		ret = smu_set_cpu_power_state(smu);
1547 		if (ret)
1548 			goto out;
1549 		ret = smu_adjust_power_state_dynamic(smu, level, false);
1550 		break;
1551 	case AMD_PP_TASK_COMPLETE_INIT:
1552 	case AMD_PP_TASK_READJUST_POWER_STATE:
1553 		ret = smu_adjust_power_state_dynamic(smu, level, true);
1554 		break;
1555 	default:
1556 		break;
1557 	}
1558 
1559 out:
1560 	if (lock_needed)
1561 		mutex_unlock(&smu->mutex);
1562 
1563 	return ret;
1564 }
1565 
1566 int smu_switch_power_profile(struct smu_context *smu,
1567 			     enum PP_SMC_POWER_PROFILE type,
1568 			     bool en)
1569 {
1570 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1571 	long workload;
1572 	uint32_t index;
1573 
1574 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1575 		return -EOPNOTSUPP;
1576 
1577 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1578 		return -EINVAL;
1579 
1580 	mutex_lock(&smu->mutex);
1581 
1582 	if (!en) {
1583 		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1584 		index = fls(smu->workload_mask);
1585 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1586 		workload = smu->workload_setting[index];
1587 	} else {
1588 		smu->workload_mask |= (1 << smu->workload_prority[type]);
1589 		index = fls(smu->workload_mask);
1590 		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1591 		workload = smu->workload_setting[index];
1592 	}
1593 
1594 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1595 		smu_set_power_profile_mode(smu, &workload, 0, false);
1596 
1597 	mutex_unlock(&smu->mutex);
1598 
1599 	return 0;
1600 }
1601 
1602 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1603 {
1604 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1605 	enum amd_dpm_forced_level level;
1606 
1607 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1608 		return -EOPNOTSUPP;
1609 
1610 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1611 		return -EINVAL;
1612 
1613 	mutex_lock(&(smu->mutex));
1614 	level = smu_dpm_ctx->dpm_level;
1615 	mutex_unlock(&(smu->mutex));
1616 
1617 	return level;
1618 }
1619 
1620 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1621 {
1622 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1623 	int ret = 0;
1624 
1625 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1626 		return -EOPNOTSUPP;
1627 
1628 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1629 		return -EINVAL;
1630 
1631 	mutex_lock(&smu->mutex);
1632 
1633 	ret = smu_enable_umd_pstate(smu, &level);
1634 	if (ret) {
1635 		mutex_unlock(&smu->mutex);
1636 		return ret;
1637 	}
1638 
1639 	ret = smu_handle_task(smu, level,
1640 			      AMD_PP_TASK_READJUST_POWER_STATE,
1641 			      false);
1642 
1643 	mutex_unlock(&smu->mutex);
1644 
1645 	return ret;
1646 }
1647 
1648 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1649 {
1650 	int ret = 0;
1651 
1652 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1653 		return -EOPNOTSUPP;
1654 
1655 	mutex_lock(&smu->mutex);
1656 	ret = smu_init_display_count(smu, count);
1657 	mutex_unlock(&smu->mutex);
1658 
1659 	return ret;
1660 }
1661 
1662 int smu_force_clk_levels(struct smu_context *smu,
1663 			 enum smu_clk_type clk_type,
1664 			 uint32_t mask)
1665 {
1666 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1667 	int ret = 0;
1668 
1669 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1670 		return -EOPNOTSUPP;
1671 
1672 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1673 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
1674 		return -EINVAL;
1675 	}
1676 
1677 	mutex_lock(&smu->mutex);
1678 
1679 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1680 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1681 
1682 	mutex_unlock(&smu->mutex);
1683 
1684 	return ret;
1685 }
1686 
1687 /*
1688  * On system suspending or resetting, the dpm_enabled
1689  * flag will be cleared. So that those SMU services which
1690  * are not supported will be gated.
1691  * However, the mp1 state setting should still be granted
1692  * even if the dpm_enabled cleared.
1693  */
1694 int smu_set_mp1_state(struct smu_context *smu,
1695 		      enum pp_mp1_state mp1_state)
1696 {
1697 	uint16_t msg;
1698 	int ret;
1699 
1700 	if (!smu->pm_enabled)
1701 		return -EOPNOTSUPP;
1702 
1703 	mutex_lock(&smu->mutex);
1704 
1705 	switch (mp1_state) {
1706 	case PP_MP1_STATE_SHUTDOWN:
1707 		msg = SMU_MSG_PrepareMp1ForShutdown;
1708 		break;
1709 	case PP_MP1_STATE_UNLOAD:
1710 		msg = SMU_MSG_PrepareMp1ForUnload;
1711 		break;
1712 	case PP_MP1_STATE_RESET:
1713 		msg = SMU_MSG_PrepareMp1ForReset;
1714 		break;
1715 	case PP_MP1_STATE_NONE:
1716 	default:
1717 		mutex_unlock(&smu->mutex);
1718 		return 0;
1719 	}
1720 
1721 	ret = smu_send_smc_msg(smu, msg, NULL);
1722 	/* some asics may not support those messages */
1723 	if (ret == -EINVAL)
1724 		ret = 0;
1725 	if (ret)
1726 		dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1727 
1728 	mutex_unlock(&smu->mutex);
1729 
1730 	return ret;
1731 }
1732 
1733 int smu_set_df_cstate(struct smu_context *smu,
1734 		      enum pp_df_cstate state)
1735 {
1736 	int ret = 0;
1737 
1738 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1739 		return -EOPNOTSUPP;
1740 
1741 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1742 		return 0;
1743 
1744 	mutex_lock(&smu->mutex);
1745 
1746 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
1747 	if (ret)
1748 		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
1749 
1750 	mutex_unlock(&smu->mutex);
1751 
1752 	return ret;
1753 }
1754 
1755 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
1756 {
1757 	int ret = 0;
1758 
1759 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1760 		return -EOPNOTSUPP;
1761 
1762 	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
1763 		return 0;
1764 
1765 	mutex_lock(&smu->mutex);
1766 
1767 	ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
1768 	if (ret)
1769 		dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
1770 
1771 	mutex_unlock(&smu->mutex);
1772 
1773 	return ret;
1774 }
1775 
1776 int smu_write_watermarks_table(struct smu_context *smu)
1777 {
1778 	int ret = 0;
1779 
1780 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1781 		return -EOPNOTSUPP;
1782 
1783 	mutex_lock(&smu->mutex);
1784 
1785 	ret = smu_set_watermarks_table(smu, NULL);
1786 
1787 	mutex_unlock(&smu->mutex);
1788 
1789 	return ret;
1790 }
1791 
1792 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
1793 		struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
1794 {
1795 	int ret = 0;
1796 
1797 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1798 		return -EOPNOTSUPP;
1799 
1800 	mutex_lock(&smu->mutex);
1801 
1802 	if (!smu->disable_watermark &&
1803 			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1804 			smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1805 		ret = smu_set_watermarks_table(smu, clock_ranges);
1806 
1807 		if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
1808 			smu->watermarks_bitmap |= WATERMARKS_EXIST;
1809 			smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
1810 		}
1811 	}
1812 
1813 	mutex_unlock(&smu->mutex);
1814 
1815 	return ret;
1816 }
1817 
1818 int smu_set_ac_dc(struct smu_context *smu)
1819 {
1820 	int ret = 0;
1821 
1822 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1823 		return -EOPNOTSUPP;
1824 
1825 	/* controlled by firmware */
1826 	if (smu->dc_controlled_by_gpio)
1827 		return 0;
1828 
1829 	mutex_lock(&smu->mutex);
1830 	ret = smu_set_power_source(smu,
1831 				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
1832 				   SMU_POWER_SOURCE_DC);
1833 	if (ret)
1834 		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
1835 		       smu->adev->pm.ac_power ? "AC" : "DC");
1836 	mutex_unlock(&smu->mutex);
1837 
1838 	return ret;
1839 }
1840 
1841 const struct amd_ip_funcs smu_ip_funcs = {
1842 	.name = "smu",
1843 	.early_init = smu_early_init,
1844 	.late_init = smu_late_init,
1845 	.sw_init = smu_sw_init,
1846 	.sw_fini = smu_sw_fini,
1847 	.hw_init = smu_hw_init,
1848 	.hw_fini = smu_hw_fini,
1849 	.suspend = smu_suspend,
1850 	.resume = smu_resume,
1851 	.is_idle = NULL,
1852 	.check_soft_reset = NULL,
1853 	.wait_for_idle = NULL,
1854 	.soft_reset = NULL,
1855 	.set_clockgating_state = smu_set_clockgating_state,
1856 	.set_powergating_state = smu_set_powergating_state,
1857 	.enable_umd_pstate = smu_enable_umd_pstate,
1858 };
1859 
1860 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
1861 {
1862 	.type = AMD_IP_BLOCK_TYPE_SMC,
1863 	.major = 11,
1864 	.minor = 0,
1865 	.rev = 0,
1866 	.funcs = &smu_ip_funcs,
1867 };
1868 
1869 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
1870 {
1871 	.type = AMD_IP_BLOCK_TYPE_SMC,
1872 	.major = 12,
1873 	.minor = 0,
1874 	.rev = 0,
1875 	.funcs = &smu_ip_funcs,
1876 };
1877 
1878 int smu_load_microcode(struct smu_context *smu)
1879 {
1880 	int ret = 0;
1881 
1882 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1883 		return -EOPNOTSUPP;
1884 
1885 	mutex_lock(&smu->mutex);
1886 
1887 	if (smu->ppt_funcs->load_microcode)
1888 		ret = smu->ppt_funcs->load_microcode(smu);
1889 
1890 	mutex_unlock(&smu->mutex);
1891 
1892 	return ret;
1893 }
1894 
1895 int smu_check_fw_status(struct smu_context *smu)
1896 {
1897 	int ret = 0;
1898 
1899 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1900 		return -EOPNOTSUPP;
1901 
1902 	mutex_lock(&smu->mutex);
1903 
1904 	if (smu->ppt_funcs->check_fw_status)
1905 		ret = smu->ppt_funcs->check_fw_status(smu);
1906 
1907 	mutex_unlock(&smu->mutex);
1908 
1909 	return ret;
1910 }
1911 
1912 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
1913 {
1914 	int ret = 0;
1915 
1916 	mutex_lock(&smu->mutex);
1917 
1918 	if (smu->ppt_funcs->set_gfx_cgpg)
1919 		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
1920 
1921 	mutex_unlock(&smu->mutex);
1922 
1923 	return ret;
1924 }
1925 
1926 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
1927 {
1928 	int ret = 0;
1929 
1930 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1931 		return -EOPNOTSUPP;
1932 
1933 	mutex_lock(&smu->mutex);
1934 
1935 	if (smu->ppt_funcs->set_fan_speed_rpm)
1936 		ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
1937 
1938 	mutex_unlock(&smu->mutex);
1939 
1940 	return ret;
1941 }
1942 
1943 int smu_get_power_limit(struct smu_context *smu,
1944 			uint32_t *limit,
1945 			bool max_setting)
1946 {
1947 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1948 		return -EOPNOTSUPP;
1949 
1950 	mutex_lock(&smu->mutex);
1951 
1952 	*limit = (max_setting ? smu->max_power_limit : smu->current_power_limit);
1953 
1954 	mutex_unlock(&smu->mutex);
1955 
1956 	return 0;
1957 }
1958 
1959 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
1960 {
1961 	int ret = 0;
1962 
1963 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1964 		return -EOPNOTSUPP;
1965 
1966 	mutex_lock(&smu->mutex);
1967 
1968 	if (limit > smu->max_power_limit) {
1969 		dev_err(smu->adev->dev,
1970 			"New power limit (%d) is over the max allowed %d\n",
1971 			limit, smu->max_power_limit);
1972 		goto out;
1973 	}
1974 
1975 	if (!limit)
1976 		limit = smu->current_power_limit;
1977 
1978 	if (smu->ppt_funcs->set_power_limit)
1979 		ret = smu->ppt_funcs->set_power_limit(smu, limit);
1980 
1981 out:
1982 	mutex_unlock(&smu->mutex);
1983 
1984 	return ret;
1985 }
1986 
1987 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
1988 {
1989 	int ret = 0;
1990 
1991 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1992 		return -EOPNOTSUPP;
1993 
1994 	mutex_lock(&smu->mutex);
1995 
1996 	if (smu->ppt_funcs->print_clk_levels)
1997 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
1998 
1999 	mutex_unlock(&smu->mutex);
2000 
2001 	return ret;
2002 }
2003 
2004 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2005 {
2006 	int ret = 0;
2007 
2008 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2009 		return -EOPNOTSUPP;
2010 
2011 	mutex_lock(&smu->mutex);
2012 
2013 	if (smu->ppt_funcs->get_od_percentage)
2014 		ret = smu->ppt_funcs->get_od_percentage(smu, type);
2015 
2016 	mutex_unlock(&smu->mutex);
2017 
2018 	return ret;
2019 }
2020 
2021 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2022 {
2023 	int ret = 0;
2024 
2025 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2026 		return -EOPNOTSUPP;
2027 
2028 	mutex_lock(&smu->mutex);
2029 
2030 	if (smu->ppt_funcs->set_od_percentage)
2031 		ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2032 
2033 	mutex_unlock(&smu->mutex);
2034 
2035 	return ret;
2036 }
2037 
2038 int smu_od_edit_dpm_table(struct smu_context *smu,
2039 			  enum PP_OD_DPM_TABLE_COMMAND type,
2040 			  long *input, uint32_t size)
2041 {
2042 	int ret = 0;
2043 
2044 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2045 		return -EOPNOTSUPP;
2046 
2047 	mutex_lock(&smu->mutex);
2048 
2049 	if (smu->ppt_funcs->od_edit_dpm_table) {
2050 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2051 		if (!ret && (type == PP_OD_COMMIT_DPM_TABLE))
2052 			ret = smu_handle_task(smu,
2053 					      smu->smu_dpm.dpm_level,
2054 					      AMD_PP_TASK_READJUST_POWER_STATE,
2055 					      false);
2056 	}
2057 
2058 	mutex_unlock(&smu->mutex);
2059 
2060 	return ret;
2061 }
2062 
2063 int smu_read_sensor(struct smu_context *smu,
2064 		    enum amd_pp_sensors sensor,
2065 		    void *data, uint32_t *size)
2066 {
2067 	struct smu_umd_pstate_table *pstate_table =
2068 				&smu->pstate_table;
2069 	int ret = 0;
2070 
2071 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2072 		return -EOPNOTSUPP;
2073 
2074 	if (!data || !size)
2075 		return -EINVAL;
2076 
2077 	mutex_lock(&smu->mutex);
2078 
2079 	if (smu->ppt_funcs->read_sensor)
2080 		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2081 			goto unlock;
2082 
2083 	switch (sensor) {
2084 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2085 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2086 		*size = 4;
2087 		break;
2088 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2089 		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2090 		*size = 4;
2091 		break;
2092 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2093 		ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
2094 		*size = 8;
2095 		break;
2096 	case AMDGPU_PP_SENSOR_UVD_POWER:
2097 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2098 		*size = 4;
2099 		break;
2100 	case AMDGPU_PP_SENSOR_VCE_POWER:
2101 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2102 		*size = 4;
2103 		break;
2104 	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2105 		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2106 		*size = 4;
2107 		break;
2108 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2109 		*(uint32_t *)data = 0;
2110 		*size = 4;
2111 		break;
2112 	default:
2113 		*size = 0;
2114 		ret = -EOPNOTSUPP;
2115 		break;
2116 	}
2117 
2118 unlock:
2119 	mutex_unlock(&smu->mutex);
2120 
2121 	return ret;
2122 }
2123 
2124 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2125 {
2126 	int ret = 0;
2127 
2128 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2129 		return -EOPNOTSUPP;
2130 
2131 	mutex_lock(&smu->mutex);
2132 
2133 	if (smu->ppt_funcs->get_power_profile_mode)
2134 		ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2135 
2136 	mutex_unlock(&smu->mutex);
2137 
2138 	return ret;
2139 }
2140 
2141 int smu_set_power_profile_mode(struct smu_context *smu,
2142 			       long *param,
2143 			       uint32_t param_size,
2144 			       bool lock_needed)
2145 {
2146 	int ret = 0;
2147 
2148 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2149 		return -EOPNOTSUPP;
2150 
2151 	if (lock_needed)
2152 		mutex_lock(&smu->mutex);
2153 
2154 	if (smu->ppt_funcs->set_power_profile_mode)
2155 		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2156 
2157 	if (lock_needed)
2158 		mutex_unlock(&smu->mutex);
2159 
2160 	return ret;
2161 }
2162 
2163 
2164 int smu_get_fan_control_mode(struct smu_context *smu)
2165 {
2166 	int ret = 0;
2167 
2168 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2169 		return -EOPNOTSUPP;
2170 
2171 	mutex_lock(&smu->mutex);
2172 
2173 	if (smu->ppt_funcs->get_fan_control_mode)
2174 		ret = smu->ppt_funcs->get_fan_control_mode(smu);
2175 
2176 	mutex_unlock(&smu->mutex);
2177 
2178 	return ret;
2179 }
2180 
2181 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2182 {
2183 	int ret = 0;
2184 
2185 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2186 		return -EOPNOTSUPP;
2187 
2188 	mutex_lock(&smu->mutex);
2189 
2190 	if (smu->ppt_funcs->set_fan_control_mode)
2191 		ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2192 
2193 	mutex_unlock(&smu->mutex);
2194 
2195 	return ret;
2196 }
2197 
2198 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2199 {
2200 	int ret = 0;
2201 	uint32_t percent;
2202 	uint32_t current_rpm;
2203 
2204 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2205 		return -EOPNOTSUPP;
2206 
2207 	mutex_lock(&smu->mutex);
2208 
2209 	if (smu->ppt_funcs->get_fan_speed_rpm) {
2210 		ret = smu->ppt_funcs->get_fan_speed_rpm(smu, &current_rpm);
2211 		if (!ret) {
2212 			percent = current_rpm * 100 / smu->fan_max_rpm;
2213 			*speed = percent > 100 ? 100 : percent;
2214 		}
2215 	}
2216 
2217 	mutex_unlock(&smu->mutex);
2218 
2219 
2220 	return ret;
2221 }
2222 
2223 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2224 {
2225 	int ret = 0;
2226 	uint32_t rpm;
2227 
2228 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2229 		return -EOPNOTSUPP;
2230 
2231 	mutex_lock(&smu->mutex);
2232 
2233 	if (smu->ppt_funcs->set_fan_speed_rpm) {
2234 		if (speed > 100)
2235 			speed = 100;
2236 		rpm = speed * smu->fan_max_rpm / 100;
2237 		ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
2238 	}
2239 
2240 	mutex_unlock(&smu->mutex);
2241 
2242 	return ret;
2243 }
2244 
2245 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2246 {
2247 	int ret = 0;
2248 
2249 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2250 		return -EOPNOTSUPP;
2251 
2252 	mutex_lock(&smu->mutex);
2253 
2254 	if (smu->ppt_funcs->get_fan_speed_rpm)
2255 		ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2256 
2257 	mutex_unlock(&smu->mutex);
2258 
2259 	return ret;
2260 }
2261 
2262 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2263 {
2264 	int ret = 0;
2265 
2266 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2267 		return -EOPNOTSUPP;
2268 
2269 	mutex_lock(&smu->mutex);
2270 
2271 	ret = smu_set_min_dcef_deep_sleep(smu, clk);
2272 
2273 	mutex_unlock(&smu->mutex);
2274 
2275 	return ret;
2276 }
2277 
2278 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2279 {
2280 	int ret = 0;
2281 
2282 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2283 		return -EOPNOTSUPP;
2284 
2285 	if (smu->ppt_funcs->set_active_display_count)
2286 		ret = smu->ppt_funcs->set_active_display_count(smu, count);
2287 
2288 	return ret;
2289 }
2290 
2291 int smu_get_clock_by_type(struct smu_context *smu,
2292 			  enum amd_pp_clock_type type,
2293 			  struct amd_pp_clocks *clocks)
2294 {
2295 	int ret = 0;
2296 
2297 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2298 		return -EOPNOTSUPP;
2299 
2300 	mutex_lock(&smu->mutex);
2301 
2302 	if (smu->ppt_funcs->get_clock_by_type)
2303 		ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2304 
2305 	mutex_unlock(&smu->mutex);
2306 
2307 	return ret;
2308 }
2309 
2310 int smu_get_max_high_clocks(struct smu_context *smu,
2311 			    struct amd_pp_simple_clock_info *clocks)
2312 {
2313 	int ret = 0;
2314 
2315 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2316 		return -EOPNOTSUPP;
2317 
2318 	mutex_lock(&smu->mutex);
2319 
2320 	if (smu->ppt_funcs->get_max_high_clocks)
2321 		ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2322 
2323 	mutex_unlock(&smu->mutex);
2324 
2325 	return ret;
2326 }
2327 
2328 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2329 				       enum smu_clk_type clk_type,
2330 				       struct pp_clock_levels_with_latency *clocks)
2331 {
2332 	int ret = 0;
2333 
2334 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2335 		return -EOPNOTSUPP;
2336 
2337 	mutex_lock(&smu->mutex);
2338 
2339 	if (smu->ppt_funcs->get_clock_by_type_with_latency)
2340 		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2341 
2342 	mutex_unlock(&smu->mutex);
2343 
2344 	return ret;
2345 }
2346 
2347 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2348 				       enum amd_pp_clock_type type,
2349 				       struct pp_clock_levels_with_voltage *clocks)
2350 {
2351 	int ret = 0;
2352 
2353 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2354 		return -EOPNOTSUPP;
2355 
2356 	mutex_lock(&smu->mutex);
2357 
2358 	if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2359 		ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2360 
2361 	mutex_unlock(&smu->mutex);
2362 
2363 	return ret;
2364 }
2365 
2366 
2367 int smu_display_clock_voltage_request(struct smu_context *smu,
2368 				      struct pp_display_clock_request *clock_req)
2369 {
2370 	int ret = 0;
2371 
2372 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2373 		return -EOPNOTSUPP;
2374 
2375 	mutex_lock(&smu->mutex);
2376 
2377 	if (smu->ppt_funcs->display_clock_voltage_request)
2378 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2379 
2380 	mutex_unlock(&smu->mutex);
2381 
2382 	return ret;
2383 }
2384 
2385 
2386 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2387 {
2388 	int ret = -EINVAL;
2389 
2390 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2391 		return -EOPNOTSUPP;
2392 
2393 	mutex_lock(&smu->mutex);
2394 
2395 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
2396 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2397 
2398 	mutex_unlock(&smu->mutex);
2399 
2400 	return ret;
2401 }
2402 
2403 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2404 {
2405 	int ret = 0;
2406 
2407 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2408 		return -EOPNOTSUPP;
2409 
2410 	mutex_lock(&smu->mutex);
2411 
2412 	if (smu->ppt_funcs->notify_smu_enable_pwe)
2413 		ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2414 
2415 	mutex_unlock(&smu->mutex);
2416 
2417 	return ret;
2418 }
2419 
2420 int smu_set_xgmi_pstate(struct smu_context *smu,
2421 			uint32_t pstate)
2422 {
2423 	int ret = 0;
2424 
2425 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2426 		return -EOPNOTSUPP;
2427 
2428 	mutex_lock(&smu->mutex);
2429 
2430 	if (smu->ppt_funcs->set_xgmi_pstate)
2431 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2432 
2433 	mutex_unlock(&smu->mutex);
2434 
2435 	if(ret)
2436 		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2437 
2438 	return ret;
2439 }
2440 
2441 int smu_set_azalia_d3_pme(struct smu_context *smu)
2442 {
2443 	int ret = 0;
2444 
2445 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2446 		return -EOPNOTSUPP;
2447 
2448 	mutex_lock(&smu->mutex);
2449 
2450 	if (smu->ppt_funcs->set_azalia_d3_pme)
2451 		ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2452 
2453 	mutex_unlock(&smu->mutex);
2454 
2455 	return ret;
2456 }
2457 
2458 /*
2459  * On system suspending or resetting, the dpm_enabled
2460  * flag will be cleared. So that those SMU services which
2461  * are not supported will be gated.
2462  *
2463  * However, the baco/mode1 reset should still be granted
2464  * as they are still supported and necessary.
2465  */
2466 bool smu_baco_is_support(struct smu_context *smu)
2467 {
2468 	bool ret = false;
2469 
2470 	if (!smu->pm_enabled)
2471 		return false;
2472 
2473 	mutex_lock(&smu->mutex);
2474 
2475 	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2476 		ret = smu->ppt_funcs->baco_is_support(smu);
2477 
2478 	mutex_unlock(&smu->mutex);
2479 
2480 	return ret;
2481 }
2482 
2483 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2484 {
2485 	if (smu->ppt_funcs->baco_get_state)
2486 		return -EINVAL;
2487 
2488 	mutex_lock(&smu->mutex);
2489 	*state = smu->ppt_funcs->baco_get_state(smu);
2490 	mutex_unlock(&smu->mutex);
2491 
2492 	return 0;
2493 }
2494 
2495 int smu_baco_enter(struct smu_context *smu)
2496 {
2497 	int ret = 0;
2498 
2499 	if (!smu->pm_enabled)
2500 		return -EOPNOTSUPP;
2501 
2502 	mutex_lock(&smu->mutex);
2503 
2504 	if (smu->ppt_funcs->baco_enter)
2505 		ret = smu->ppt_funcs->baco_enter(smu);
2506 
2507 	mutex_unlock(&smu->mutex);
2508 
2509 	if (ret)
2510 		dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
2511 
2512 	return ret;
2513 }
2514 
2515 int smu_baco_exit(struct smu_context *smu)
2516 {
2517 	int ret = 0;
2518 
2519 	if (!smu->pm_enabled)
2520 		return -EOPNOTSUPP;
2521 
2522 	mutex_lock(&smu->mutex);
2523 
2524 	if (smu->ppt_funcs->baco_exit)
2525 		ret = smu->ppt_funcs->baco_exit(smu);
2526 
2527 	mutex_unlock(&smu->mutex);
2528 
2529 	if (ret)
2530 		dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
2531 
2532 	return ret;
2533 }
2534 
2535 bool smu_mode1_reset_is_support(struct smu_context *smu)
2536 {
2537 	bool ret = false;
2538 
2539 	if (!smu->pm_enabled)
2540 		return false;
2541 
2542 	mutex_lock(&smu->mutex);
2543 
2544 	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2545 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2546 
2547 	mutex_unlock(&smu->mutex);
2548 
2549 	return ret;
2550 }
2551 
2552 int smu_mode1_reset(struct smu_context *smu)
2553 {
2554 	int ret = 0;
2555 
2556 	if (!smu->pm_enabled)
2557 		return -EOPNOTSUPP;
2558 
2559 	mutex_lock(&smu->mutex);
2560 
2561 	if (smu->ppt_funcs->mode1_reset)
2562 		ret = smu->ppt_funcs->mode1_reset(smu);
2563 
2564 	mutex_unlock(&smu->mutex);
2565 
2566 	return ret;
2567 }
2568 
2569 int smu_mode2_reset(struct smu_context *smu)
2570 {
2571 	int ret = 0;
2572 
2573 	if (!smu->pm_enabled)
2574 		return -EOPNOTSUPP;
2575 
2576 	mutex_lock(&smu->mutex);
2577 
2578 	if (smu->ppt_funcs->mode2_reset)
2579 		ret = smu->ppt_funcs->mode2_reset(smu);
2580 
2581 	mutex_unlock(&smu->mutex);
2582 
2583 	if (ret)
2584 		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2585 
2586 	return ret;
2587 }
2588 
2589 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2590 					 struct pp_smu_nv_clock_table *max_clocks)
2591 {
2592 	int ret = 0;
2593 
2594 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2595 		return -EOPNOTSUPP;
2596 
2597 	mutex_lock(&smu->mutex);
2598 
2599 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2600 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2601 
2602 	mutex_unlock(&smu->mutex);
2603 
2604 	return ret;
2605 }
2606 
2607 int smu_get_uclk_dpm_states(struct smu_context *smu,
2608 			    unsigned int *clock_values_in_khz,
2609 			    unsigned int *num_states)
2610 {
2611 	int ret = 0;
2612 
2613 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2614 		return -EOPNOTSUPP;
2615 
2616 	mutex_lock(&smu->mutex);
2617 
2618 	if (smu->ppt_funcs->get_uclk_dpm_states)
2619 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2620 
2621 	mutex_unlock(&smu->mutex);
2622 
2623 	return ret;
2624 }
2625 
2626 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2627 {
2628 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2629 
2630 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2631 		return -EOPNOTSUPP;
2632 
2633 	mutex_lock(&smu->mutex);
2634 
2635 	if (smu->ppt_funcs->get_current_power_state)
2636 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
2637 
2638 	mutex_unlock(&smu->mutex);
2639 
2640 	return pm_state;
2641 }
2642 
2643 int smu_get_dpm_clock_table(struct smu_context *smu,
2644 			    struct dpm_clocks *clock_table)
2645 {
2646 	int ret = 0;
2647 
2648 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2649 		return -EOPNOTSUPP;
2650 
2651 	mutex_lock(&smu->mutex);
2652 
2653 	if (smu->ppt_funcs->get_dpm_clock_table)
2654 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2655 
2656 	mutex_unlock(&smu->mutex);
2657 
2658 	return ret;
2659 }
2660 
2661 ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu,
2662 				void **table)
2663 {
2664 	ssize_t size;
2665 
2666 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2667 		return -EOPNOTSUPP;
2668 
2669 	if (!smu->ppt_funcs->get_gpu_metrics)
2670 		return -EOPNOTSUPP;
2671 
2672 	mutex_lock(&smu->mutex);
2673 
2674 	size = smu->ppt_funcs->get_gpu_metrics(smu, table);
2675 
2676 	mutex_unlock(&smu->mutex);
2677 
2678 	return size;
2679 }
2680 
2681 int smu_enable_mgpu_fan_boost(struct smu_context *smu)
2682 {
2683 	int ret = 0;
2684 
2685 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2686 		return -EOPNOTSUPP;
2687 
2688 	mutex_lock(&smu->mutex);
2689 
2690 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
2691 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
2692 
2693 	mutex_unlock(&smu->mutex);
2694 
2695 	return ret;
2696 }
2697