1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <drm/drmP.h>
28 #include <linux/firmware.h>
29 #include <drm/amdgpu_drm.h>
30 #include "amdgpu.h"
31 #include "atom.h"
32 #include "amdgpu_ucode.h"
33 
34 struct amdgpu_cgs_device {
35 	struct cgs_device base;
36 	struct amdgpu_device *adev;
37 };
38 
39 #define CGS_FUNC_ADEV							\
40 	struct amdgpu_device *adev =					\
41 		((struct amdgpu_cgs_device *)cgs_device)->adev
42 
43 
44 static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
45 {
46 	CGS_FUNC_ADEV;
47 	return RREG32(offset);
48 }
49 
50 static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
51 				      uint32_t value)
52 {
53 	CGS_FUNC_ADEV;
54 	WREG32(offset, value);
55 }
56 
57 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
58 					     enum cgs_ind_reg space,
59 					     unsigned index)
60 {
61 	CGS_FUNC_ADEV;
62 	switch (space) {
63 	case CGS_IND_REG__MMIO:
64 		return RREG32_IDX(index);
65 	case CGS_IND_REG__PCIE:
66 		return RREG32_PCIE(index);
67 	case CGS_IND_REG__SMC:
68 		return RREG32_SMC(index);
69 	case CGS_IND_REG__UVD_CTX:
70 		return RREG32_UVD_CTX(index);
71 	case CGS_IND_REG__DIDT:
72 		return RREG32_DIDT(index);
73 	case CGS_IND_REG_GC_CAC:
74 		return RREG32_GC_CAC(index);
75 	case CGS_IND_REG_SE_CAC:
76 		return RREG32_SE_CAC(index);
77 	case CGS_IND_REG__AUDIO_ENDPT:
78 		DRM_ERROR("audio endpt register access not implemented.\n");
79 		return 0;
80 	}
81 	WARN(1, "Invalid indirect register space");
82 	return 0;
83 }
84 
85 static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
86 					  enum cgs_ind_reg space,
87 					  unsigned index, uint32_t value)
88 {
89 	CGS_FUNC_ADEV;
90 	switch (space) {
91 	case CGS_IND_REG__MMIO:
92 		return WREG32_IDX(index, value);
93 	case CGS_IND_REG__PCIE:
94 		return WREG32_PCIE(index, value);
95 	case CGS_IND_REG__SMC:
96 		return WREG32_SMC(index, value);
97 	case CGS_IND_REG__UVD_CTX:
98 		return WREG32_UVD_CTX(index, value);
99 	case CGS_IND_REG__DIDT:
100 		return WREG32_DIDT(index, value);
101 	case CGS_IND_REG_GC_CAC:
102 		return WREG32_GC_CAC(index, value);
103 	case CGS_IND_REG_SE_CAC:
104 		return WREG32_SE_CAC(index, value);
105 	case CGS_IND_REG__AUDIO_ENDPT:
106 		DRM_ERROR("audio endpt register access not implemented.\n");
107 		return;
108 	}
109 	WARN(1, "Invalid indirect register space");
110 }
111 
112 static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
113 				       enum cgs_resource_type resource_type,
114 				       uint64_t size,
115 				       uint64_t offset,
116 				       uint64_t *resource_base)
117 {
118 	CGS_FUNC_ADEV;
119 
120 	if (resource_base == NULL)
121 		return -EINVAL;
122 
123 	switch (resource_type) {
124 	case CGS_RESOURCE_TYPE_MMIO:
125 		if (adev->rmmio_size == 0)
126 			return -ENOENT;
127 		if ((offset + size) > adev->rmmio_size)
128 			return -EINVAL;
129 		*resource_base = adev->rmmio_base;
130 		return 0;
131 	case CGS_RESOURCE_TYPE_DOORBELL:
132 		if (adev->doorbell.size == 0)
133 			return -ENOENT;
134 		if ((offset + size) > adev->doorbell.size)
135 			return -EINVAL;
136 		*resource_base = adev->doorbell.base;
137 		return 0;
138 	case CGS_RESOURCE_TYPE_FB:
139 	case CGS_RESOURCE_TYPE_IO:
140 	case CGS_RESOURCE_TYPE_ROM:
141 	default:
142 		return -EINVAL;
143 	}
144 }
145 
146 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
147 						  unsigned table, uint16_t *size,
148 						  uint8_t *frev, uint8_t *crev)
149 {
150 	CGS_FUNC_ADEV;
151 	uint16_t data_start;
152 
153 	if (amdgpu_atom_parse_data_header(
154 		    adev->mode_info.atom_context, table, size,
155 		    frev, crev, &data_start))
156 		return (uint8_t*)adev->mode_info.atom_context->bios +
157 			data_start;
158 
159 	return NULL;
160 }
161 
162 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
163 					      uint8_t *frev, uint8_t *crev)
164 {
165 	CGS_FUNC_ADEV;
166 
167 	if (amdgpu_atom_parse_cmd_header(
168 		    adev->mode_info.atom_context, table,
169 		    frev, crev))
170 		return 0;
171 
172 	return -EINVAL;
173 }
174 
175 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
176 					  void *args)
177 {
178 	CGS_FUNC_ADEV;
179 
180 	return amdgpu_atom_execute_table(
181 		adev->mode_info.atom_context, table, args);
182 }
183 
184 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
185 				  enum amd_ip_block_type block_type,
186 				  enum amd_clockgating_state state)
187 {
188 	CGS_FUNC_ADEV;
189 	int i, r = -1;
190 
191 	for (i = 0; i < adev->num_ip_blocks; i++) {
192 		if (!adev->ip_blocks[i].status.valid)
193 			continue;
194 
195 		if (adev->ip_blocks[i].version->type == block_type) {
196 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
197 								(void *)adev,
198 									state);
199 			break;
200 		}
201 	}
202 	return r;
203 }
204 
205 static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
206 				  enum amd_ip_block_type block_type,
207 				  enum amd_powergating_state state)
208 {
209 	CGS_FUNC_ADEV;
210 	int i, r = -1;
211 
212 	for (i = 0; i < adev->num_ip_blocks; i++) {
213 		if (!adev->ip_blocks[i].status.valid)
214 			continue;
215 
216 		if (adev->ip_blocks[i].version->type == block_type) {
217 			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
218 								(void *)adev,
219 									state);
220 			break;
221 		}
222 	}
223 	return r;
224 }
225 
226 
227 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
228 {
229 	CGS_FUNC_ADEV;
230 	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
231 
232 	switch (fw_type) {
233 	case CGS_UCODE_ID_SDMA0:
234 		result = AMDGPU_UCODE_ID_SDMA0;
235 		break;
236 	case CGS_UCODE_ID_SDMA1:
237 		result = AMDGPU_UCODE_ID_SDMA1;
238 		break;
239 	case CGS_UCODE_ID_CP_CE:
240 		result = AMDGPU_UCODE_ID_CP_CE;
241 		break;
242 	case CGS_UCODE_ID_CP_PFP:
243 		result = AMDGPU_UCODE_ID_CP_PFP;
244 		break;
245 	case CGS_UCODE_ID_CP_ME:
246 		result = AMDGPU_UCODE_ID_CP_ME;
247 		break;
248 	case CGS_UCODE_ID_CP_MEC:
249 	case CGS_UCODE_ID_CP_MEC_JT1:
250 		result = AMDGPU_UCODE_ID_CP_MEC1;
251 		break;
252 	case CGS_UCODE_ID_CP_MEC_JT2:
253 		/* for VI. JT2 should be the same as JT1, because:
254 			1, MEC2 and MEC1 use exactly same FW.
255 			2, JT2 is not pached but JT1 is.
256 		*/
257 		if (adev->asic_type >= CHIP_TOPAZ)
258 			result = AMDGPU_UCODE_ID_CP_MEC1;
259 		else
260 			result = AMDGPU_UCODE_ID_CP_MEC2;
261 		break;
262 	case CGS_UCODE_ID_RLC_G:
263 		result = AMDGPU_UCODE_ID_RLC_G;
264 		break;
265 	case CGS_UCODE_ID_STORAGE:
266 		result = AMDGPU_UCODE_ID_STORAGE;
267 		break;
268 	default:
269 		DRM_ERROR("Firmware type not supported\n");
270 	}
271 	return result;
272 }
273 
274 static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
275 {
276 	CGS_FUNC_ADEV;
277 	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
278 		release_firmware(adev->pm.fw);
279 		adev->pm.fw = NULL;
280 		return 0;
281 	}
282 	/* cannot release other firmware because they are not created by cgs */
283 	return -EINVAL;
284 }
285 
286 static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
287 					enum cgs_ucode_id type)
288 {
289 	CGS_FUNC_ADEV;
290 	uint16_t fw_version = 0;
291 
292 	switch (type) {
293 		case CGS_UCODE_ID_SDMA0:
294 			fw_version = adev->sdma.instance[0].fw_version;
295 			break;
296 		case CGS_UCODE_ID_SDMA1:
297 			fw_version = adev->sdma.instance[1].fw_version;
298 			break;
299 		case CGS_UCODE_ID_CP_CE:
300 			fw_version = adev->gfx.ce_fw_version;
301 			break;
302 		case CGS_UCODE_ID_CP_PFP:
303 			fw_version = adev->gfx.pfp_fw_version;
304 			break;
305 		case CGS_UCODE_ID_CP_ME:
306 			fw_version = adev->gfx.me_fw_version;
307 			break;
308 		case CGS_UCODE_ID_CP_MEC:
309 			fw_version = adev->gfx.mec_fw_version;
310 			break;
311 		case CGS_UCODE_ID_CP_MEC_JT1:
312 			fw_version = adev->gfx.mec_fw_version;
313 			break;
314 		case CGS_UCODE_ID_CP_MEC_JT2:
315 			fw_version = adev->gfx.mec_fw_version;
316 			break;
317 		case CGS_UCODE_ID_RLC_G:
318 			fw_version = adev->gfx.rlc_fw_version;
319 			break;
320 		case CGS_UCODE_ID_STORAGE:
321 			break;
322 		default:
323 			DRM_ERROR("firmware type %d do not have version\n", type);
324 			break;
325 	}
326 	return fw_version;
327 }
328 
329 static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
330 					bool en)
331 {
332 	CGS_FUNC_ADEV;
333 
334 	if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
335 		adev->gfx.rlc.funcs->exit_safe_mode == NULL)
336 		return 0;
337 
338 	if (en)
339 		adev->gfx.rlc.funcs->enter_safe_mode(adev);
340 	else
341 		adev->gfx.rlc.funcs->exit_safe_mode(adev);
342 
343 	return 0;
344 }
345 
346 static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
347 					bool lock)
348 {
349 	CGS_FUNC_ADEV;
350 
351 	if (lock)
352 		mutex_lock(&adev->grbm_idx_mutex);
353 	else
354 		mutex_unlock(&adev->grbm_idx_mutex);
355 }
356 
357 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
358 					enum cgs_ucode_id type,
359 					struct cgs_firmware_info *info)
360 {
361 	CGS_FUNC_ADEV;
362 
363 	if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
364 		uint64_t gpu_addr;
365 		uint32_t data_size;
366 		const struct gfx_firmware_header_v1_0 *header;
367 		enum AMDGPU_UCODE_ID id;
368 		struct amdgpu_firmware_info *ucode;
369 
370 		id = fw_type_convert(cgs_device, type);
371 		ucode = &adev->firmware.ucode[id];
372 		if (ucode->fw == NULL)
373 			return -EINVAL;
374 
375 		gpu_addr  = ucode->mc_addr;
376 		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
377 		data_size = le32_to_cpu(header->header.ucode_size_bytes);
378 
379 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
380 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
381 			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
382 			data_size = le32_to_cpu(header->jt_size) << 2;
383 		}
384 
385 		info->kptr = ucode->kaddr;
386 		info->image_size = data_size;
387 		info->mc_addr = gpu_addr;
388 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
389 
390 		if (CGS_UCODE_ID_CP_MEC == type)
391 			info->image_size = le32_to_cpu(header->jt_offset) << 2;
392 
393 		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
394 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
395 	} else {
396 		char fw_name[30] = {0};
397 		int err = 0;
398 		uint32_t ucode_size;
399 		uint32_t ucode_start_address;
400 		const uint8_t *src;
401 		const struct smc_firmware_header_v1_0 *hdr;
402 		const struct common_firmware_header *header;
403 		struct amdgpu_firmware_info *ucode = NULL;
404 
405 		if (!adev->pm.fw) {
406 			switch (adev->asic_type) {
407 			case CHIP_TAHITI:
408 				strcpy(fw_name, "radeon/tahiti_smc.bin");
409 				break;
410 			case CHIP_PITCAIRN:
411 				if ((adev->pdev->revision == 0x81) &&
412 				    ((adev->pdev->device == 0x6810) ||
413 				    (adev->pdev->device == 0x6811))) {
414 					info->is_kicker = true;
415 					strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
416 				} else {
417 					strcpy(fw_name, "radeon/pitcairn_smc.bin");
418 				}
419 				break;
420 			case CHIP_VERDE:
421 				if (((adev->pdev->device == 0x6820) &&
422 					((adev->pdev->revision == 0x81) ||
423 					(adev->pdev->revision == 0x83))) ||
424 				    ((adev->pdev->device == 0x6821) &&
425 					((adev->pdev->revision == 0x83) ||
426 					(adev->pdev->revision == 0x87))) ||
427 				    ((adev->pdev->revision == 0x87) &&
428 					((adev->pdev->device == 0x6823) ||
429 					(adev->pdev->device == 0x682b)))) {
430 					info->is_kicker = true;
431 					strcpy(fw_name, "radeon/verde_k_smc.bin");
432 				} else {
433 					strcpy(fw_name, "radeon/verde_smc.bin");
434 				}
435 				break;
436 			case CHIP_OLAND:
437 				if (((adev->pdev->revision == 0x81) &&
438 					((adev->pdev->device == 0x6600) ||
439 					(adev->pdev->device == 0x6604) ||
440 					(adev->pdev->device == 0x6605) ||
441 					(adev->pdev->device == 0x6610))) ||
442 				    ((adev->pdev->revision == 0x83) &&
443 					(adev->pdev->device == 0x6610))) {
444 					info->is_kicker = true;
445 					strcpy(fw_name, "radeon/oland_k_smc.bin");
446 				} else {
447 					strcpy(fw_name, "radeon/oland_smc.bin");
448 				}
449 				break;
450 			case CHIP_HAINAN:
451 				if (((adev->pdev->revision == 0x81) &&
452 					(adev->pdev->device == 0x6660)) ||
453 				    ((adev->pdev->revision == 0x83) &&
454 					((adev->pdev->device == 0x6660) ||
455 					(adev->pdev->device == 0x6663) ||
456 					(adev->pdev->device == 0x6665) ||
457 					 (adev->pdev->device == 0x6667)))) {
458 					info->is_kicker = true;
459 					strcpy(fw_name, "radeon/hainan_k_smc.bin");
460 				} else if ((adev->pdev->revision == 0xc3) &&
461 					 (adev->pdev->device == 0x6665)) {
462 					info->is_kicker = true;
463 					strcpy(fw_name, "radeon/banks_k_2_smc.bin");
464 				} else {
465 					strcpy(fw_name, "radeon/hainan_smc.bin");
466 				}
467 				break;
468 			case CHIP_BONAIRE:
469 				if ((adev->pdev->revision == 0x80) ||
470 					(adev->pdev->revision == 0x81) ||
471 					(adev->pdev->device == 0x665f)) {
472 					info->is_kicker = true;
473 					strcpy(fw_name, "radeon/bonaire_k_smc.bin");
474 				} else {
475 					strcpy(fw_name, "radeon/bonaire_smc.bin");
476 				}
477 				break;
478 			case CHIP_HAWAII:
479 				if (adev->pdev->revision == 0x80) {
480 					info->is_kicker = true;
481 					strcpy(fw_name, "radeon/hawaii_k_smc.bin");
482 				} else {
483 					strcpy(fw_name, "radeon/hawaii_smc.bin");
484 				}
485 				break;
486 			case CHIP_TOPAZ:
487 				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
488 				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
489 				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
490 					info->is_kicker = true;
491 					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
492 				} else
493 					strcpy(fw_name, "amdgpu/topaz_smc.bin");
494 				break;
495 			case CHIP_TONGA:
496 				if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
497 				    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
498 					info->is_kicker = true;
499 					strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
500 				} else
501 					strcpy(fw_name, "amdgpu/tonga_smc.bin");
502 				break;
503 			case CHIP_FIJI:
504 				strcpy(fw_name, "amdgpu/fiji_smc.bin");
505 				break;
506 			case CHIP_POLARIS11:
507 				if (type == CGS_UCODE_ID_SMU) {
508 					if (((adev->pdev->device == 0x67ef) &&
509 					     ((adev->pdev->revision == 0xe0) ||
510 					      (adev->pdev->revision == 0xe2) ||
511 					      (adev->pdev->revision == 0xe5))) ||
512 					    ((adev->pdev->device == 0x67ff) &&
513 					     ((adev->pdev->revision == 0xcf) ||
514 					      (adev->pdev->revision == 0xef) ||
515 					      (adev->pdev->revision == 0xff)))) {
516 						info->is_kicker = true;
517 						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
518 					} else
519 						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
520 				} else if (type == CGS_UCODE_ID_SMU_SK) {
521 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
522 				}
523 				break;
524 			case CHIP_POLARIS10:
525 				if (type == CGS_UCODE_ID_SMU) {
526 					if ((adev->pdev->device == 0x67df) &&
527 					    ((adev->pdev->revision == 0xe0) ||
528 					     (adev->pdev->revision == 0xe3) ||
529 					     (adev->pdev->revision == 0xe4) ||
530 					     (adev->pdev->revision == 0xe5) ||
531 					     (adev->pdev->revision == 0xe7) ||
532 					     (adev->pdev->revision == 0xef))) {
533 						info->is_kicker = true;
534 						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
535 					} else
536 						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
537 				} else if (type == CGS_UCODE_ID_SMU_SK) {
538 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
539 				}
540 				break;
541 			case CHIP_POLARIS12:
542 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
543 				break;
544 			case CHIP_VEGA10:
545 				if ((adev->pdev->device == 0x687f) &&
546 					((adev->pdev->revision == 0xc0) ||
547 					(adev->pdev->revision == 0xc1) ||
548 					(adev->pdev->revision == 0xc3)))
549 					strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
550 				else
551 					strcpy(fw_name, "amdgpu/vega10_smc.bin");
552 				break;
553 			case CHIP_VEGA12:
554 				strcpy(fw_name, "amdgpu/vega12_smc.bin");
555 				break;
556 			default:
557 				DRM_ERROR("SMC firmware not supported\n");
558 				return -EINVAL;
559 			}
560 
561 			err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
562 			if (err) {
563 				DRM_ERROR("Failed to request firmware\n");
564 				return err;
565 			}
566 
567 			err = amdgpu_ucode_validate(adev->pm.fw);
568 			if (err) {
569 				DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
570 				release_firmware(adev->pm.fw);
571 				adev->pm.fw = NULL;
572 				return err;
573 			}
574 
575 			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
576 				ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
577 				ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
578 				ucode->fw = adev->pm.fw;
579 				header = (const struct common_firmware_header *)ucode->fw->data;
580 				adev->firmware.fw_size +=
581 					ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
582 			}
583 		}
584 
585 		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
586 		amdgpu_ucode_print_smc_hdr(&hdr->header);
587 		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
588 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
589 		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
590 		src = (const uint8_t *)(adev->pm.fw->data +
591 		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
592 
593 		info->version = adev->pm.fw_version;
594 		info->image_size = ucode_size;
595 		info->ucode_start_address = ucode_start_address;
596 		info->kptr = (void *)src;
597 	}
598 	return 0;
599 }
600 
601 static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
602 {
603 	CGS_FUNC_ADEV;
604 	return amdgpu_sriov_vf(adev);
605 }
606 
607 static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
608 					  struct cgs_display_info *info)
609 {
610 	CGS_FUNC_ADEV;
611 	struct cgs_mode_info *mode_info;
612 
613 	if (info == NULL)
614 		return -EINVAL;
615 
616 	mode_info = info->mode_info;
617 	if (mode_info)
618 		/* if the displays are off, vblank time is max */
619 		mode_info->vblank_time_us = 0xffffffff;
620 
621 	if (!amdgpu_device_has_dc_support(adev)) {
622 		struct amdgpu_crtc *amdgpu_crtc;
623 		struct drm_device *ddev = adev->ddev;
624 		struct drm_crtc *crtc;
625 		uint32_t line_time_us, vblank_lines;
626 
627 		if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
628 			list_for_each_entry(crtc,
629 					&ddev->mode_config.crtc_list, head) {
630 				amdgpu_crtc = to_amdgpu_crtc(crtc);
631 				if (crtc->enabled) {
632 					info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
633 					info->display_count++;
634 				}
635 				if (mode_info != NULL &&
636 					crtc->enabled && amdgpu_crtc->enabled &&
637 					amdgpu_crtc->hw_mode.clock) {
638 					line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
639 								amdgpu_crtc->hw_mode.clock;
640 					vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
641 								amdgpu_crtc->hw_mode.crtc_vdisplay +
642 								(amdgpu_crtc->v_border * 2);
643 					mode_info->vblank_time_us = vblank_lines * line_time_us;
644 					mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
645 					/* we have issues with mclk switching with refresh rates
646 					 * over 120 hz on the non-DC code.
647 					 */
648 					if (mode_info->refresh_rate > 120)
649 						mode_info->vblank_time_us = 0;
650 					mode_info = NULL;
651 				}
652 			}
653 		}
654 	} else {
655 		info->display_count = adev->pm.pm_display_cfg.num_display;
656 		if (mode_info != NULL) {
657 			mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
658 			mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
659 		}
660 	}
661 	return 0;
662 }
663 
664 
665 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
666 {
667 	CGS_FUNC_ADEV;
668 
669 	adev->pm.dpm_enabled = enabled;
670 
671 	return 0;
672 }
673 
674 static const struct cgs_ops amdgpu_cgs_ops = {
675 	.read_register = amdgpu_cgs_read_register,
676 	.write_register = amdgpu_cgs_write_register,
677 	.read_ind_register = amdgpu_cgs_read_ind_register,
678 	.write_ind_register = amdgpu_cgs_write_ind_register,
679 	.get_pci_resource = amdgpu_cgs_get_pci_resource,
680 	.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
681 	.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
682 	.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
683 	.get_firmware_info = amdgpu_cgs_get_firmware_info,
684 	.rel_firmware = amdgpu_cgs_rel_firmware,
685 	.set_powergating_state = amdgpu_cgs_set_powergating_state,
686 	.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
687 	.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
688 	.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
689 	.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
690 	.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
691 	.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
692 };
693 
694 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
695 {
696 	struct amdgpu_cgs_device *cgs_device =
697 		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
698 
699 	if (!cgs_device) {
700 		DRM_ERROR("Couldn't allocate CGS device structure\n");
701 		return NULL;
702 	}
703 
704 	cgs_device->base.ops = &amdgpu_cgs_ops;
705 	cgs_device->adev = adev;
706 
707 	return (struct cgs_device *)cgs_device;
708 }
709 
710 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
711 {
712 	kfree(cgs_device);
713 }
714