1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/acpi.h>
28 #include <drm/drmP.h>
29 #include <linux/firmware.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32 #include "cgs_linux.h"
33 #include "atom.h"
34 #include "amdgpu_ucode.h"
35 
36 struct amdgpu_cgs_device {
37 	struct cgs_device base;
38 	struct amdgpu_device *adev;
39 };
40 
41 #define CGS_FUNC_ADEV							\
42 	struct amdgpu_device *adev =					\
43 		((struct amdgpu_cgs_device *)cgs_device)->adev
44 
45 static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
46 				    enum cgs_gpu_mem_type type,
47 				    uint64_t size, uint64_t align,
48 				    uint64_t min_offset, uint64_t max_offset,
49 				    cgs_handle_t *handle)
50 {
51 	CGS_FUNC_ADEV;
52 	uint16_t flags = 0;
53 	int ret = 0;
54 	uint32_t domain = 0;
55 	struct amdgpu_bo *obj;
56 	struct ttm_placement placement;
57 	struct ttm_place place;
58 
59 	if (min_offset > max_offset) {
60 		BUG_ON(1);
61 		return -EINVAL;
62 	}
63 
64 	/* fail if the alignment is not a power of 2 */
65 	if (((align != 1) && (align & (align - 1)))
66 	    || size == 0 || align == 0)
67 		return -EINVAL;
68 
69 
70 	switch(type) {
71 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
72 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
73 		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
74 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
75 		domain = AMDGPU_GEM_DOMAIN_VRAM;
76 		if (max_offset > adev->mc.real_vram_size)
77 			return -EINVAL;
78 		place.fpfn = min_offset >> PAGE_SHIFT;
79 		place.lpfn = max_offset >> PAGE_SHIFT;
80 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
81 			TTM_PL_FLAG_VRAM;
82 		break;
83 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
84 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
85 		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
86 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
87 		domain = AMDGPU_GEM_DOMAIN_VRAM;
88 		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
89 			place.fpfn =
90 				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
91 			place.lpfn =
92 				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
93 			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
94 				TTM_PL_FLAG_VRAM;
95 		}
96 
97 		break;
98 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
99 		domain = AMDGPU_GEM_DOMAIN_GTT;
100 		place.fpfn = min_offset >> PAGE_SHIFT;
101 		place.lpfn = max_offset >> PAGE_SHIFT;
102 		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
103 		break;
104 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
105 		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
106 		domain = AMDGPU_GEM_DOMAIN_GTT;
107 		place.fpfn = min_offset >> PAGE_SHIFT;
108 		place.lpfn = max_offset >> PAGE_SHIFT;
109 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
110 			TTM_PL_FLAG_UNCACHED;
111 		break;
112 	default:
113 		return -EINVAL;
114 	}
115 
116 
117 	*handle = 0;
118 
119 	placement.placement = &place;
120 	placement.num_placement = 1;
121 	placement.busy_placement = &place;
122 	placement.num_busy_placement = 1;
123 
124 	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
125 					  true, domain, flags,
126 					  NULL, &placement, NULL,
127 					  0, &obj);
128 	if (ret) {
129 		DRM_ERROR("(%d) bo create failed\n", ret);
130 		return ret;
131 	}
132 	*handle = (cgs_handle_t)obj;
133 
134 	return ret;
135 }
136 
137 static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
138 {
139 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
140 
141 	if (obj) {
142 		int r = amdgpu_bo_reserve(obj, true);
143 		if (likely(r == 0)) {
144 			amdgpu_bo_kunmap(obj);
145 			amdgpu_bo_unpin(obj);
146 			amdgpu_bo_unreserve(obj);
147 		}
148 		amdgpu_bo_unref(&obj);
149 
150 	}
151 	return 0;
152 }
153 
154 static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
155 				   uint64_t *mcaddr)
156 {
157 	int r;
158 	u64 min_offset, max_offset;
159 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
160 
161 	WARN_ON_ONCE(obj->placement.num_placement > 1);
162 
163 	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
164 	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
165 
166 	r = amdgpu_bo_reserve(obj, true);
167 	if (unlikely(r != 0))
168 		return r;
169 	r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
170 				     min_offset, max_offset, mcaddr);
171 	amdgpu_bo_unreserve(obj);
172 	return r;
173 }
174 
175 static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
176 {
177 	int r;
178 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
179 	r = amdgpu_bo_reserve(obj, true);
180 	if (unlikely(r != 0))
181 		return r;
182 	r = amdgpu_bo_unpin(obj);
183 	amdgpu_bo_unreserve(obj);
184 	return r;
185 }
186 
187 static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
188 				   void **map)
189 {
190 	int r;
191 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
192 	r = amdgpu_bo_reserve(obj, true);
193 	if (unlikely(r != 0))
194 		return r;
195 	r = amdgpu_bo_kmap(obj, map);
196 	amdgpu_bo_unreserve(obj);
197 	return r;
198 }
199 
200 static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
201 {
202 	int r;
203 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
204 	r = amdgpu_bo_reserve(obj, true);
205 	if (unlikely(r != 0))
206 		return r;
207 	amdgpu_bo_kunmap(obj);
208 	amdgpu_bo_unreserve(obj);
209 	return r;
210 }
211 
212 static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
213 {
214 	CGS_FUNC_ADEV;
215 	return RREG32(offset);
216 }
217 
218 static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
219 				      uint32_t value)
220 {
221 	CGS_FUNC_ADEV;
222 	WREG32(offset, value);
223 }
224 
225 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
226 					     enum cgs_ind_reg space,
227 					     unsigned index)
228 {
229 	CGS_FUNC_ADEV;
230 	switch (space) {
231 	case CGS_IND_REG__MMIO:
232 		return RREG32_IDX(index);
233 	case CGS_IND_REG__PCIE:
234 		return RREG32_PCIE(index);
235 	case CGS_IND_REG__SMC:
236 		return RREG32_SMC(index);
237 	case CGS_IND_REG__UVD_CTX:
238 		return RREG32_UVD_CTX(index);
239 	case CGS_IND_REG__DIDT:
240 		return RREG32_DIDT(index);
241 	case CGS_IND_REG_GC_CAC:
242 		return RREG32_GC_CAC(index);
243 	case CGS_IND_REG_SE_CAC:
244 		return RREG32_SE_CAC(index);
245 	case CGS_IND_REG__AUDIO_ENDPT:
246 		DRM_ERROR("audio endpt register access not implemented.\n");
247 		return 0;
248 	}
249 	WARN(1, "Invalid indirect register space");
250 	return 0;
251 }
252 
253 static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
254 					  enum cgs_ind_reg space,
255 					  unsigned index, uint32_t value)
256 {
257 	CGS_FUNC_ADEV;
258 	switch (space) {
259 	case CGS_IND_REG__MMIO:
260 		return WREG32_IDX(index, value);
261 	case CGS_IND_REG__PCIE:
262 		return WREG32_PCIE(index, value);
263 	case CGS_IND_REG__SMC:
264 		return WREG32_SMC(index, value);
265 	case CGS_IND_REG__UVD_CTX:
266 		return WREG32_UVD_CTX(index, value);
267 	case CGS_IND_REG__DIDT:
268 		return WREG32_DIDT(index, value);
269 	case CGS_IND_REG_GC_CAC:
270 		return WREG32_GC_CAC(index, value);
271 	case CGS_IND_REG_SE_CAC:
272 		return WREG32_SE_CAC(index, value);
273 	case CGS_IND_REG__AUDIO_ENDPT:
274 		DRM_ERROR("audio endpt register access not implemented.\n");
275 		return;
276 	}
277 	WARN(1, "Invalid indirect register space");
278 }
279 
280 static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
281 				       enum cgs_resource_type resource_type,
282 				       uint64_t size,
283 				       uint64_t offset,
284 				       uint64_t *resource_base)
285 {
286 	CGS_FUNC_ADEV;
287 
288 	if (resource_base == NULL)
289 		return -EINVAL;
290 
291 	switch (resource_type) {
292 	case CGS_RESOURCE_TYPE_MMIO:
293 		if (adev->rmmio_size == 0)
294 			return -ENOENT;
295 		if ((offset + size) > adev->rmmio_size)
296 			return -EINVAL;
297 		*resource_base = adev->rmmio_base;
298 		return 0;
299 	case CGS_RESOURCE_TYPE_DOORBELL:
300 		if (adev->doorbell.size == 0)
301 			return -ENOENT;
302 		if ((offset + size) > adev->doorbell.size)
303 			return -EINVAL;
304 		*resource_base = adev->doorbell.base;
305 		return 0;
306 	case CGS_RESOURCE_TYPE_FB:
307 	case CGS_RESOURCE_TYPE_IO:
308 	case CGS_RESOURCE_TYPE_ROM:
309 	default:
310 		return -EINVAL;
311 	}
312 }
313 
314 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
315 						  unsigned table, uint16_t *size,
316 						  uint8_t *frev, uint8_t *crev)
317 {
318 	CGS_FUNC_ADEV;
319 	uint16_t data_start;
320 
321 	if (amdgpu_atom_parse_data_header(
322 		    adev->mode_info.atom_context, table, size,
323 		    frev, crev, &data_start))
324 		return (uint8_t*)adev->mode_info.atom_context->bios +
325 			data_start;
326 
327 	return NULL;
328 }
329 
330 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
331 					      uint8_t *frev, uint8_t *crev)
332 {
333 	CGS_FUNC_ADEV;
334 
335 	if (amdgpu_atom_parse_cmd_header(
336 		    adev->mode_info.atom_context, table,
337 		    frev, crev))
338 		return 0;
339 
340 	return -EINVAL;
341 }
342 
343 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
344 					  void *args)
345 {
346 	CGS_FUNC_ADEV;
347 
348 	return amdgpu_atom_execute_table(
349 		adev->mode_info.atom_context, table, args);
350 }
351 
352 struct cgs_irq_params {
353 	unsigned src_id;
354 	cgs_irq_source_set_func_t set;
355 	cgs_irq_handler_func_t handler;
356 	void *private_data;
357 };
358 
359 static int cgs_set_irq_state(struct amdgpu_device *adev,
360 			     struct amdgpu_irq_src *src,
361 			     unsigned type,
362 			     enum amdgpu_interrupt_state state)
363 {
364 	struct cgs_irq_params *irq_params =
365 		(struct cgs_irq_params *)src->data;
366 	if (!irq_params)
367 		return -EINVAL;
368 	if (!irq_params->set)
369 		return -EINVAL;
370 	return irq_params->set(irq_params->private_data,
371 			       irq_params->src_id,
372 			       type,
373 			       (int)state);
374 }
375 
376 static int cgs_process_irq(struct amdgpu_device *adev,
377 			   struct amdgpu_irq_src *source,
378 			   struct amdgpu_iv_entry *entry)
379 {
380 	struct cgs_irq_params *irq_params =
381 		(struct cgs_irq_params *)source->data;
382 	if (!irq_params)
383 		return -EINVAL;
384 	if (!irq_params->handler)
385 		return -EINVAL;
386 	return irq_params->handler(irq_params->private_data,
387 				   irq_params->src_id,
388 				   entry->iv_entry);
389 }
390 
391 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
392 	.set = cgs_set_irq_state,
393 	.process = cgs_process_irq,
394 };
395 
396 static int amdgpu_cgs_add_irq_source(void *cgs_device,
397 				     unsigned client_id,
398 				     unsigned src_id,
399 				     unsigned num_types,
400 				     cgs_irq_source_set_func_t set,
401 				     cgs_irq_handler_func_t handler,
402 				     void *private_data)
403 {
404 	CGS_FUNC_ADEV;
405 	int ret = 0;
406 	struct cgs_irq_params *irq_params;
407 	struct amdgpu_irq_src *source =
408 		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
409 	if (!source)
410 		return -ENOMEM;
411 	irq_params =
412 		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
413 	if (!irq_params) {
414 		kfree(source);
415 		return -ENOMEM;
416 	}
417 	source->num_types = num_types;
418 	source->funcs = &cgs_irq_funcs;
419 	irq_params->src_id = src_id;
420 	irq_params->set = set;
421 	irq_params->handler = handler;
422 	irq_params->private_data = private_data;
423 	source->data = (void *)irq_params;
424 	ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
425 	if (ret) {
426 		kfree(irq_params);
427 		kfree(source);
428 	}
429 
430 	return ret;
431 }
432 
433 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
434 			      unsigned src_id, unsigned type)
435 {
436 	CGS_FUNC_ADEV;
437 
438 	if (!adev->irq.client[client_id].sources)
439 		return -EINVAL;
440 
441 	return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
442 }
443 
444 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
445 			      unsigned src_id, unsigned type)
446 {
447 	CGS_FUNC_ADEV;
448 
449 	if (!adev->irq.client[client_id].sources)
450 		return -EINVAL;
451 
452 	return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
453 }
454 
455 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
456 				  enum amd_ip_block_type block_type,
457 				  enum amd_clockgating_state state)
458 {
459 	CGS_FUNC_ADEV;
460 	int i, r = -1;
461 
462 	for (i = 0; i < adev->num_ip_blocks; i++) {
463 		if (!adev->ip_blocks[i].status.valid)
464 			continue;
465 
466 		if (adev->ip_blocks[i].version->type == block_type) {
467 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
468 								(void *)adev,
469 									state);
470 			break;
471 		}
472 	}
473 	return r;
474 }
475 
476 static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
477 				  enum amd_ip_block_type block_type,
478 				  enum amd_powergating_state state)
479 {
480 	CGS_FUNC_ADEV;
481 	int i, r = -1;
482 
483 	for (i = 0; i < adev->num_ip_blocks; i++) {
484 		if (!adev->ip_blocks[i].status.valid)
485 			continue;
486 
487 		if (adev->ip_blocks[i].version->type == block_type) {
488 			r = adev->ip_blocks[i].version->funcs->set_powergating_state(
489 								(void *)adev,
490 									state);
491 			break;
492 		}
493 	}
494 	return r;
495 }
496 
497 
498 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
499 {
500 	CGS_FUNC_ADEV;
501 	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
502 
503 	switch (fw_type) {
504 	case CGS_UCODE_ID_SDMA0:
505 		result = AMDGPU_UCODE_ID_SDMA0;
506 		break;
507 	case CGS_UCODE_ID_SDMA1:
508 		result = AMDGPU_UCODE_ID_SDMA1;
509 		break;
510 	case CGS_UCODE_ID_CP_CE:
511 		result = AMDGPU_UCODE_ID_CP_CE;
512 		break;
513 	case CGS_UCODE_ID_CP_PFP:
514 		result = AMDGPU_UCODE_ID_CP_PFP;
515 		break;
516 	case CGS_UCODE_ID_CP_ME:
517 		result = AMDGPU_UCODE_ID_CP_ME;
518 		break;
519 	case CGS_UCODE_ID_CP_MEC:
520 	case CGS_UCODE_ID_CP_MEC_JT1:
521 		result = AMDGPU_UCODE_ID_CP_MEC1;
522 		break;
523 	case CGS_UCODE_ID_CP_MEC_JT2:
524 		/* for VI. JT2 should be the same as JT1, because:
525 			1, MEC2 and MEC1 use exactly same FW.
526 			2, JT2 is not pached but JT1 is.
527 		*/
528 		if (adev->asic_type >= CHIP_TOPAZ)
529 			result = AMDGPU_UCODE_ID_CP_MEC1;
530 		else
531 			result = AMDGPU_UCODE_ID_CP_MEC2;
532 		break;
533 	case CGS_UCODE_ID_RLC_G:
534 		result = AMDGPU_UCODE_ID_RLC_G;
535 		break;
536 	case CGS_UCODE_ID_STORAGE:
537 		result = AMDGPU_UCODE_ID_STORAGE;
538 		break;
539 	default:
540 		DRM_ERROR("Firmware type not supported\n");
541 	}
542 	return result;
543 }
544 
545 static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
546 {
547 	CGS_FUNC_ADEV;
548 	if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
549 		release_firmware(adev->pm.fw);
550 		adev->pm.fw = NULL;
551 		return 0;
552 	}
553 	/* cannot release other firmware because they are not created by cgs */
554 	return -EINVAL;
555 }
556 
557 static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
558 					enum cgs_ucode_id type)
559 {
560 	CGS_FUNC_ADEV;
561 	uint16_t fw_version = 0;
562 
563 	switch (type) {
564 		case CGS_UCODE_ID_SDMA0:
565 			fw_version = adev->sdma.instance[0].fw_version;
566 			break;
567 		case CGS_UCODE_ID_SDMA1:
568 			fw_version = adev->sdma.instance[1].fw_version;
569 			break;
570 		case CGS_UCODE_ID_CP_CE:
571 			fw_version = adev->gfx.ce_fw_version;
572 			break;
573 		case CGS_UCODE_ID_CP_PFP:
574 			fw_version = adev->gfx.pfp_fw_version;
575 			break;
576 		case CGS_UCODE_ID_CP_ME:
577 			fw_version = adev->gfx.me_fw_version;
578 			break;
579 		case CGS_UCODE_ID_CP_MEC:
580 			fw_version = adev->gfx.mec_fw_version;
581 			break;
582 		case CGS_UCODE_ID_CP_MEC_JT1:
583 			fw_version = adev->gfx.mec_fw_version;
584 			break;
585 		case CGS_UCODE_ID_CP_MEC_JT2:
586 			fw_version = adev->gfx.mec_fw_version;
587 			break;
588 		case CGS_UCODE_ID_RLC_G:
589 			fw_version = adev->gfx.rlc_fw_version;
590 			break;
591 		case CGS_UCODE_ID_STORAGE:
592 			break;
593 		default:
594 			DRM_ERROR("firmware type %d do not have version\n", type);
595 			break;
596 	}
597 	return fw_version;
598 }
599 
600 static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
601 					bool en)
602 {
603 	CGS_FUNC_ADEV;
604 
605 	if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
606 		adev->gfx.rlc.funcs->exit_safe_mode == NULL)
607 		return 0;
608 
609 	if (en)
610 		adev->gfx.rlc.funcs->enter_safe_mode(adev);
611 	else
612 		adev->gfx.rlc.funcs->exit_safe_mode(adev);
613 
614 	return 0;
615 }
616 
617 static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
618 					bool lock)
619 {
620 	CGS_FUNC_ADEV;
621 
622 	if (lock)
623 		mutex_lock(&adev->grbm_idx_mutex);
624 	else
625 		mutex_unlock(&adev->grbm_idx_mutex);
626 }
627 
628 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
629 					enum cgs_ucode_id type,
630 					struct cgs_firmware_info *info)
631 {
632 	CGS_FUNC_ADEV;
633 
634 	if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
635 		uint64_t gpu_addr;
636 		uint32_t data_size;
637 		const struct gfx_firmware_header_v1_0 *header;
638 		enum AMDGPU_UCODE_ID id;
639 		struct amdgpu_firmware_info *ucode;
640 
641 		id = fw_type_convert(cgs_device, type);
642 		ucode = &adev->firmware.ucode[id];
643 		if (ucode->fw == NULL)
644 			return -EINVAL;
645 
646 		gpu_addr  = ucode->mc_addr;
647 		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
648 		data_size = le32_to_cpu(header->header.ucode_size_bytes);
649 
650 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
651 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
652 			gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
653 			data_size = le32_to_cpu(header->jt_size) << 2;
654 		}
655 
656 		info->kptr = ucode->kaddr;
657 		info->image_size = data_size;
658 		info->mc_addr = gpu_addr;
659 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
660 
661 		if (CGS_UCODE_ID_CP_MEC == type)
662 			info->image_size = le32_to_cpu(header->jt_offset) << 2;
663 
664 		info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
665 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
666 	} else {
667 		char fw_name[30] = {0};
668 		int err = 0;
669 		uint32_t ucode_size;
670 		uint32_t ucode_start_address;
671 		const uint8_t *src;
672 		const struct smc_firmware_header_v1_0 *hdr;
673 		const struct common_firmware_header *header;
674 		struct amdgpu_firmware_info *ucode = NULL;
675 
676 		if (!adev->pm.fw) {
677 			switch (adev->asic_type) {
678 			case CHIP_TOPAZ:
679 				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
680 				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
681 				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
682 					info->is_kicker = true;
683 					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
684 				} else
685 					strcpy(fw_name, "amdgpu/topaz_smc.bin");
686 				break;
687 			case CHIP_TONGA:
688 				if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
689 				    ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
690 					info->is_kicker = true;
691 					strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
692 				} else
693 					strcpy(fw_name, "amdgpu/tonga_smc.bin");
694 				break;
695 			case CHIP_FIJI:
696 				strcpy(fw_name, "amdgpu/fiji_smc.bin");
697 				break;
698 			case CHIP_POLARIS11:
699 				if (type == CGS_UCODE_ID_SMU) {
700 					if (((adev->pdev->device == 0x67ef) &&
701 					     ((adev->pdev->revision == 0xe0) ||
702 					      (adev->pdev->revision == 0xe2) ||
703 					      (adev->pdev->revision == 0xe5))) ||
704 					    ((adev->pdev->device == 0x67ff) &&
705 					     ((adev->pdev->revision == 0xcf) ||
706 					      (adev->pdev->revision == 0xef) ||
707 					      (adev->pdev->revision == 0xff)))) {
708 						info->is_kicker = true;
709 						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
710 					} else
711 						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
712 				} else if (type == CGS_UCODE_ID_SMU_SK) {
713 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
714 				}
715 				break;
716 			case CHIP_POLARIS10:
717 				if (type == CGS_UCODE_ID_SMU) {
718 					if ((adev->pdev->device == 0x67df) &&
719 					    ((adev->pdev->revision == 0xe0) ||
720 					     (adev->pdev->revision == 0xe3) ||
721 					     (adev->pdev->revision == 0xe4) ||
722 					     (adev->pdev->revision == 0xe5) ||
723 					     (adev->pdev->revision == 0xe7) ||
724 					     (adev->pdev->revision == 0xef))) {
725 						info->is_kicker = true;
726 						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
727 					} else
728 						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
729 				} else if (type == CGS_UCODE_ID_SMU_SK) {
730 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
731 				}
732 				break;
733 			case CHIP_POLARIS12:
734 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
735 				break;
736 			case CHIP_VEGA10:
737 				if ((adev->pdev->device == 0x687f) &&
738 					((adev->pdev->revision == 0xc0) ||
739 					(adev->pdev->revision == 0xc1) ||
740 					(adev->pdev->revision == 0xc3)))
741 					strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
742 				else
743 					strcpy(fw_name, "amdgpu/vega10_smc.bin");
744 				break;
745 			default:
746 				DRM_ERROR("SMC firmware not supported\n");
747 				return -EINVAL;
748 			}
749 
750 			err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
751 			if (err) {
752 				DRM_ERROR("Failed to request firmware\n");
753 				return err;
754 			}
755 
756 			err = amdgpu_ucode_validate(adev->pm.fw);
757 			if (err) {
758 				DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
759 				release_firmware(adev->pm.fw);
760 				adev->pm.fw = NULL;
761 				return err;
762 			}
763 
764 			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
765 				ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
766 				ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
767 				ucode->fw = adev->pm.fw;
768 				header = (const struct common_firmware_header *)ucode->fw->data;
769 				adev->firmware.fw_size +=
770 					ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
771 			}
772 		}
773 
774 		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
775 		amdgpu_ucode_print_smc_hdr(&hdr->header);
776 		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
777 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
778 		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
779 		src = (const uint8_t *)(adev->pm.fw->data +
780 		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
781 
782 		info->version = adev->pm.fw_version;
783 		info->image_size = ucode_size;
784 		info->ucode_start_address = ucode_start_address;
785 		info->kptr = (void *)src;
786 	}
787 	return 0;
788 }
789 
790 static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
791 {
792 	CGS_FUNC_ADEV;
793 	return amdgpu_sriov_vf(adev);
794 }
795 
796 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
797 					struct cgs_system_info *sys_info)
798 {
799 	CGS_FUNC_ADEV;
800 
801 	if (NULL == sys_info)
802 		return -ENODEV;
803 
804 	if (sizeof(struct cgs_system_info) != sys_info->size)
805 		return -ENODEV;
806 
807 	switch (sys_info->info_id) {
808 	case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
809 		sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
810 		break;
811 	case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
812 		sys_info->value = adev->pm.pcie_gen_mask;
813 		break;
814 	case CGS_SYSTEM_INFO_PCIE_MLW:
815 		sys_info->value = adev->pm.pcie_mlw_mask;
816 		break;
817 	case CGS_SYSTEM_INFO_PCIE_DEV:
818 		sys_info->value = adev->pdev->device;
819 		break;
820 	case CGS_SYSTEM_INFO_PCIE_REV:
821 		sys_info->value = adev->pdev->revision;
822 		break;
823 	case CGS_SYSTEM_INFO_CG_FLAGS:
824 		sys_info->value = adev->cg_flags;
825 		break;
826 	case CGS_SYSTEM_INFO_PG_FLAGS:
827 		sys_info->value = adev->pg_flags;
828 		break;
829 	case CGS_SYSTEM_INFO_GFX_CU_INFO:
830 		sys_info->value = adev->gfx.cu_info.number;
831 		break;
832 	case CGS_SYSTEM_INFO_GFX_SE_INFO:
833 		sys_info->value = adev->gfx.config.max_shader_engines;
834 		break;
835 	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
836 		sys_info->value = adev->pdev->subsystem_device;
837 		break;
838 	case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
839 		sys_info->value = adev->pdev->subsystem_vendor;
840 		break;
841 	default:
842 		return -ENODEV;
843 	}
844 
845 	return 0;
846 }
847 
848 static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
849 					  struct cgs_display_info *info)
850 {
851 	CGS_FUNC_ADEV;
852 	struct amdgpu_crtc *amdgpu_crtc;
853 	struct drm_device *ddev = adev->ddev;
854 	struct drm_crtc *crtc;
855 	uint32_t line_time_us, vblank_lines;
856 	struct cgs_mode_info *mode_info;
857 
858 	if (info == NULL)
859 		return -EINVAL;
860 
861 	mode_info = info->mode_info;
862 	if (mode_info) {
863 		/* if the displays are off, vblank time is max */
864 		mode_info->vblank_time_us = 0xffffffff;
865 		/* always set the reference clock */
866 		mode_info->ref_clock = adev->clock.spll.reference_freq;
867 	}
868 
869 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
870 		list_for_each_entry(crtc,
871 				&ddev->mode_config.crtc_list, head) {
872 			amdgpu_crtc = to_amdgpu_crtc(crtc);
873 			if (crtc->enabled) {
874 				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
875 				info->display_count++;
876 			}
877 			if (mode_info != NULL &&
878 				crtc->enabled && amdgpu_crtc->enabled &&
879 				amdgpu_crtc->hw_mode.clock) {
880 				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
881 							amdgpu_crtc->hw_mode.clock;
882 				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
883 							amdgpu_crtc->hw_mode.crtc_vdisplay +
884 							(amdgpu_crtc->v_border * 2);
885 				mode_info->vblank_time_us = vblank_lines * line_time_us;
886 				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
887 				mode_info->ref_clock = adev->clock.spll.reference_freq;
888 				mode_info = NULL;
889 			}
890 		}
891 	}
892 
893 	return 0;
894 }
895 
896 
897 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
898 {
899 	CGS_FUNC_ADEV;
900 
901 	adev->pm.dpm_enabled = enabled;
902 
903 	return 0;
904 }
905 
906 /** \brief evaluate acpi namespace object, handle or pathname must be valid
907  *  \param cgs_device
908  *  \param info input/output arguments for the control method
909  *  \return status
910  */
911 
912 #if defined(CONFIG_ACPI)
913 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
914 				    struct cgs_acpi_method_info *info)
915 {
916 	CGS_FUNC_ADEV;
917 	acpi_handle handle;
918 	struct acpi_object_list input;
919 	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
920 	union acpi_object *params, *obj;
921 	uint8_t name[5] = {'\0'};
922 	struct cgs_acpi_method_argument *argument;
923 	uint32_t i, count;
924 	acpi_status status;
925 	int result;
926 
927 	handle = ACPI_HANDLE(&adev->pdev->dev);
928 	if (!handle)
929 		return -ENODEV;
930 
931 	memset(&input, 0, sizeof(struct acpi_object_list));
932 
933 	/* validate input info */
934 	if (info->size != sizeof(struct cgs_acpi_method_info))
935 		return -EINVAL;
936 
937 	input.count = info->input_count;
938 	if (info->input_count > 0) {
939 		if (info->pinput_argument == NULL)
940 			return -EINVAL;
941 		argument = info->pinput_argument;
942 		for (i = 0; i < info->input_count; i++) {
943 			if (((argument->type == ACPI_TYPE_STRING) ||
944 			     (argument->type == ACPI_TYPE_BUFFER)) &&
945 			    (argument->pointer == NULL))
946 				return -EINVAL;
947 			argument++;
948 		}
949 	}
950 
951 	if (info->output_count > 0) {
952 		if (info->poutput_argument == NULL)
953 			return -EINVAL;
954 		argument = info->poutput_argument;
955 		for (i = 0; i < info->output_count; i++) {
956 			if (((argument->type == ACPI_TYPE_STRING) ||
957 				(argument->type == ACPI_TYPE_BUFFER))
958 				&& (argument->pointer == NULL))
959 				return -EINVAL;
960 			argument++;
961 		}
962 	}
963 
964 	/* The path name passed to acpi_evaluate_object should be null terminated */
965 	if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
966 		strncpy(name, (char *)&(info->name), sizeof(uint32_t));
967 		name[4] = '\0';
968 	}
969 
970 	/* parse input parameters */
971 	if (input.count > 0) {
972 		input.pointer = params =
973 				kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
974 		if (params == NULL)
975 			return -EINVAL;
976 
977 		argument = info->pinput_argument;
978 
979 		for (i = 0; i < input.count; i++) {
980 			params->type = argument->type;
981 			switch (params->type) {
982 			case ACPI_TYPE_INTEGER:
983 				params->integer.value = argument->value;
984 				break;
985 			case ACPI_TYPE_STRING:
986 				params->string.length = argument->data_length;
987 				params->string.pointer = argument->pointer;
988 				break;
989 			case ACPI_TYPE_BUFFER:
990 				params->buffer.length = argument->data_length;
991 				params->buffer.pointer = argument->pointer;
992 				break;
993 			default:
994 				break;
995 			}
996 			params++;
997 			argument++;
998 		}
999 	}
1000 
1001 	/* parse output info */
1002 	count = info->output_count;
1003 	argument = info->poutput_argument;
1004 
1005 	/* evaluate the acpi method */
1006 	status = acpi_evaluate_object(handle, name, &input, &output);
1007 
1008 	if (ACPI_FAILURE(status)) {
1009 		result = -EIO;
1010 		goto free_input;
1011 	}
1012 
1013 	/* return the output info */
1014 	obj = output.pointer;
1015 
1016 	if (count > 1) {
1017 		if ((obj->type != ACPI_TYPE_PACKAGE) ||
1018 			(obj->package.count != count)) {
1019 			result = -EIO;
1020 			goto free_obj;
1021 		}
1022 		params = obj->package.elements;
1023 	} else
1024 		params = obj;
1025 
1026 	if (params == NULL) {
1027 		result = -EIO;
1028 		goto free_obj;
1029 	}
1030 
1031 	for (i = 0; i < count; i++) {
1032 		if (argument->type != params->type) {
1033 			result = -EIO;
1034 			goto free_obj;
1035 		}
1036 		switch (params->type) {
1037 		case ACPI_TYPE_INTEGER:
1038 			argument->value = params->integer.value;
1039 			break;
1040 		case ACPI_TYPE_STRING:
1041 			if ((params->string.length != argument->data_length) ||
1042 				(params->string.pointer == NULL)) {
1043 				result = -EIO;
1044 				goto free_obj;
1045 			}
1046 			strncpy(argument->pointer,
1047 				params->string.pointer,
1048 				params->string.length);
1049 			break;
1050 		case ACPI_TYPE_BUFFER:
1051 			if (params->buffer.pointer == NULL) {
1052 				result = -EIO;
1053 				goto free_obj;
1054 			}
1055 			memcpy(argument->pointer,
1056 				params->buffer.pointer,
1057 				argument->data_length);
1058 			break;
1059 		default:
1060 			break;
1061 		}
1062 		argument++;
1063 		params++;
1064 	}
1065 
1066 	result = 0;
1067 free_obj:
1068 	kfree(obj);
1069 free_input:
1070 	kfree((void *)input.pointer);
1071 	return result;
1072 }
1073 #else
1074 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1075 				struct cgs_acpi_method_info *info)
1076 {
1077 	return -EIO;
1078 }
1079 #endif
1080 
1081 static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1082 					uint32_t acpi_method,
1083 					uint32_t acpi_function,
1084 					void *pinput, void *poutput,
1085 					uint32_t output_count,
1086 					uint32_t input_size,
1087 					uint32_t output_size)
1088 {
1089 	struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
1090 	struct cgs_acpi_method_argument acpi_output = {0};
1091 	struct cgs_acpi_method_info info = {0};
1092 
1093 	acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1094 	acpi_input[0].data_length = sizeof(uint32_t);
1095 	acpi_input[0].value = acpi_function;
1096 
1097 	acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1098 	acpi_input[1].data_length = input_size;
1099 	acpi_input[1].pointer = pinput;
1100 
1101 	acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1102 	acpi_output.data_length = output_size;
1103 	acpi_output.pointer = poutput;
1104 
1105 	info.size = sizeof(struct cgs_acpi_method_info);
1106 	info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
1107 	info.input_count = 2;
1108 	info.name = acpi_method;
1109 	info.pinput_argument = acpi_input;
1110 	info.output_count = output_count;
1111 	info.poutput_argument = &acpi_output;
1112 
1113 	return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
1114 }
1115 
1116 static const struct cgs_ops amdgpu_cgs_ops = {
1117 	.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
1118 	.free_gpu_mem = amdgpu_cgs_free_gpu_mem,
1119 	.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
1120 	.gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
1121 	.kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
1122 	.kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
1123 	.read_register = amdgpu_cgs_read_register,
1124 	.write_register = amdgpu_cgs_write_register,
1125 	.read_ind_register = amdgpu_cgs_read_ind_register,
1126 	.write_ind_register = amdgpu_cgs_write_ind_register,
1127 	.get_pci_resource = amdgpu_cgs_get_pci_resource,
1128 	.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
1129 	.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
1130 	.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
1131 	.get_firmware_info = amdgpu_cgs_get_firmware_info,
1132 	.rel_firmware = amdgpu_cgs_rel_firmware,
1133 	.set_powergating_state = amdgpu_cgs_set_powergating_state,
1134 	.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
1135 	.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
1136 	.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
1137 	.call_acpi_method = amdgpu_cgs_call_acpi_method,
1138 	.query_system_info = amdgpu_cgs_query_system_info,
1139 	.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
1140 	.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
1141 	.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
1142 };
1143 
1144 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
1145 	.add_irq_source = amdgpu_cgs_add_irq_source,
1146 	.irq_get = amdgpu_cgs_irq_get,
1147 	.irq_put = amdgpu_cgs_irq_put
1148 };
1149 
1150 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
1151 {
1152 	struct amdgpu_cgs_device *cgs_device =
1153 		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
1154 
1155 	if (!cgs_device) {
1156 		DRM_ERROR("Couldn't allocate CGS device structure\n");
1157 		return NULL;
1158 	}
1159 
1160 	cgs_device->base.ops = &amdgpu_cgs_ops;
1161 	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
1162 	cgs_device->adev = adev;
1163 
1164 	return (struct cgs_device *)cgs_device;
1165 }
1166 
1167 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
1168 {
1169 	kfree(cgs_device);
1170 }
1171