1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/amdgpu_drm.h>
36 #include <linux/vgaarb.h>
37 #include <linux/vga_switcheroo.h>
38 #include <linux/efi.h>
39 #include "amdgpu.h"
40 #include "amdgpu_trace.h"
41 #include "amdgpu_i2c.h"
42 #include "atom.h"
43 #include "amdgpu_atombios.h"
44 #include "amdgpu_atomfirmware.h"
45 #include "amd_pcie.h"
46 #ifdef CONFIG_DRM_AMDGPU_SI
47 #include "si.h"
48 #endif
49 #ifdef CONFIG_DRM_AMDGPU_CIK
50 #include "cik.h"
51 #endif
52 #include "vi.h"
53 #include "soc15.h"
54 #include "bif/bif_4_1_d.h"
55 #include <linux/pci.h>
56 #include <linux/firmware.h>
57 #include "amdgpu_vf_error.h"
58 
59 #include "amdgpu_amdkfd.h"
60 #include "amdgpu_pm.h"
61 
62 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
63 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
64 
65 #define AMDGPU_RESUME_MS		2000
66 
67 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
68 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
69 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
70 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
71 
72 static const char *amdgpu_asic_name[] = {
73 	"TAHITI",
74 	"PITCAIRN",
75 	"VERDE",
76 	"OLAND",
77 	"HAINAN",
78 	"BONAIRE",
79 	"KAVERI",
80 	"KABINI",
81 	"HAWAII",
82 	"MULLINS",
83 	"TOPAZ",
84 	"TONGA",
85 	"FIJI",
86 	"CARRIZO",
87 	"STONEY",
88 	"POLARIS10",
89 	"POLARIS11",
90 	"POLARIS12",
91 	"VEGA10",
92 	"RAVEN",
93 	"LAST",
94 };
95 
96 bool amdgpu_device_is_px(struct drm_device *dev)
97 {
98 	struct amdgpu_device *adev = dev->dev_private;
99 
100 	if (adev->flags & AMD_IS_PX)
101 		return true;
102 	return false;
103 }
104 
105 /*
106  * MMIO register access helper functions.
107  */
108 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
109 			uint32_t acc_flags)
110 {
111 	uint32_t ret;
112 
113 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
114 		return amdgpu_virt_kiq_rreg(adev, reg);
115 
116 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
117 		ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
118 	else {
119 		unsigned long flags;
120 
121 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
125 	}
126 	trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
127 	return ret;
128 }
129 
130 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
131 		    uint32_t acc_flags)
132 {
133 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
134 
135 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
136 		adev->last_mm_index = v;
137 	}
138 
139 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
140 		return amdgpu_virt_kiq_wreg(adev, reg, v);
141 
142 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
143 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
144 	else {
145 		unsigned long flags;
146 
147 		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
148 		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
149 		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
150 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
151 	}
152 
153 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
154 		udelay(500);
155 	}
156 }
157 
158 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
159 {
160 	if ((reg * 4) < adev->rio_mem_size)
161 		return ioread32(adev->rio_mem + (reg * 4));
162 	else {
163 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
164 		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
165 	}
166 }
167 
168 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169 {
170 	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
171 		adev->last_mm_index = v;
172 	}
173 
174 	if ((reg * 4) < adev->rio_mem_size)
175 		iowrite32(v, adev->rio_mem + (reg * 4));
176 	else {
177 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
178 		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
179 	}
180 
181 	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
182 		udelay(500);
183 	}
184 }
185 
186 /**
187  * amdgpu_mm_rdoorbell - read a doorbell dword
188  *
189  * @adev: amdgpu_device pointer
190  * @index: doorbell index
191  *
192  * Returns the value in the doorbell aperture at the
193  * requested doorbell index (CIK).
194  */
195 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
196 {
197 	if (index < adev->doorbell.num_doorbells) {
198 		return readl(adev->doorbell.ptr + index);
199 	} else {
200 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
201 		return 0;
202 	}
203 }
204 
205 /**
206  * amdgpu_mm_wdoorbell - write a doorbell dword
207  *
208  * @adev: amdgpu_device pointer
209  * @index: doorbell index
210  * @v: value to write
211  *
212  * Writes @v to the doorbell aperture at the
213  * requested doorbell index (CIK).
214  */
215 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
216 {
217 	if (index < adev->doorbell.num_doorbells) {
218 		writel(v, adev->doorbell.ptr + index);
219 	} else {
220 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
221 	}
222 }
223 
224 /**
225  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
226  *
227  * @adev: amdgpu_device pointer
228  * @index: doorbell index
229  *
230  * Returns the value in the doorbell aperture at the
231  * requested doorbell index (VEGA10+).
232  */
233 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
234 {
235 	if (index < adev->doorbell.num_doorbells) {
236 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
237 	} else {
238 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
239 		return 0;
240 	}
241 }
242 
243 /**
244  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
245  *
246  * @adev: amdgpu_device pointer
247  * @index: doorbell index
248  * @v: value to write
249  *
250  * Writes @v to the doorbell aperture at the
251  * requested doorbell index (VEGA10+).
252  */
253 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
254 {
255 	if (index < adev->doorbell.num_doorbells) {
256 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
257 	} else {
258 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
259 	}
260 }
261 
262 /**
263  * amdgpu_invalid_rreg - dummy reg read function
264  *
265  * @adev: amdgpu device pointer
266  * @reg: offset of register
267  *
268  * Dummy register read function.  Used for register blocks
269  * that certain asics don't have (all asics).
270  * Returns the value in the register.
271  */
272 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
273 {
274 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
275 	BUG();
276 	return 0;
277 }
278 
279 /**
280  * amdgpu_invalid_wreg - dummy reg write function
281  *
282  * @adev: amdgpu device pointer
283  * @reg: offset of register
284  * @v: value to write to the register
285  *
286  * Dummy register read function.  Used for register blocks
287  * that certain asics don't have (all asics).
288  */
289 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
290 {
291 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
292 		  reg, v);
293 	BUG();
294 }
295 
296 /**
297  * amdgpu_block_invalid_rreg - dummy reg read function
298  *
299  * @adev: amdgpu device pointer
300  * @block: offset of instance
301  * @reg: offset of register
302  *
303  * Dummy register read function.  Used for register blocks
304  * that certain asics don't have (all asics).
305  * Returns the value in the register.
306  */
307 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
308 					  uint32_t block, uint32_t reg)
309 {
310 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 		  reg, block);
312 	BUG();
313 	return 0;
314 }
315 
316 /**
317  * amdgpu_block_invalid_wreg - dummy reg write function
318  *
319  * @adev: amdgpu device pointer
320  * @block: offset of instance
321  * @reg: offset of register
322  * @v: value to write to the register
323  *
324  * Dummy register read function.  Used for register blocks
325  * that certain asics don't have (all asics).
326  */
327 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
328 				      uint32_t block,
329 				      uint32_t reg, uint32_t v)
330 {
331 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
332 		  reg, block, v);
333 	BUG();
334 }
335 
336 static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
337 {
338 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
339 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
340 				       &adev->vram_scratch.robj,
341 				       &adev->vram_scratch.gpu_addr,
342 				       (void **)&adev->vram_scratch.ptr);
343 }
344 
345 static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
346 {
347 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
348 }
349 
350 /**
351  * amdgpu_program_register_sequence - program an array of registers.
352  *
353  * @adev: amdgpu_device pointer
354  * @registers: pointer to the register array
355  * @array_size: size of the register array
356  *
357  * Programs an array or registers with and and or masks.
358  * This is a helper for setting golden registers.
359  */
360 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
361 				      const u32 *registers,
362 				      const u32 array_size)
363 {
364 	u32 tmp, reg, and_mask, or_mask;
365 	int i;
366 
367 	if (array_size % 3)
368 		return;
369 
370 	for (i = 0; i < array_size; i +=3) {
371 		reg = registers[i + 0];
372 		and_mask = registers[i + 1];
373 		or_mask = registers[i + 2];
374 
375 		if (and_mask == 0xffffffff) {
376 			tmp = or_mask;
377 		} else {
378 			tmp = RREG32(reg);
379 			tmp &= ~and_mask;
380 			tmp |= or_mask;
381 		}
382 		WREG32(reg, tmp);
383 	}
384 }
385 
386 void amdgpu_pci_config_reset(struct amdgpu_device *adev)
387 {
388 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
389 }
390 
391 /*
392  * GPU doorbell aperture helpers function.
393  */
394 /**
395  * amdgpu_doorbell_init - Init doorbell driver information.
396  *
397  * @adev: amdgpu_device pointer
398  *
399  * Init doorbell driver information (CIK)
400  * Returns 0 on success, error on failure.
401  */
402 static int amdgpu_doorbell_init(struct amdgpu_device *adev)
403 {
404 	/* No doorbell on SI hardware generation */
405 	if (adev->asic_type < CHIP_BONAIRE) {
406 		adev->doorbell.base = 0;
407 		adev->doorbell.size = 0;
408 		adev->doorbell.num_doorbells = 0;
409 		adev->doorbell.ptr = NULL;
410 		return 0;
411 	}
412 
413 	/* doorbell bar mapping */
414 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
415 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
416 
417 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
418 					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
419 	if (adev->doorbell.num_doorbells == 0)
420 		return -EINVAL;
421 
422 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
423 				     adev->doorbell.num_doorbells *
424 				     sizeof(u32));
425 	if (adev->doorbell.ptr == NULL)
426 		return -ENOMEM;
427 
428 	return 0;
429 }
430 
431 /**
432  * amdgpu_doorbell_fini - Tear down doorbell driver information.
433  *
434  * @adev: amdgpu_device pointer
435  *
436  * Tear down doorbell driver information (CIK)
437  */
438 static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
439 {
440 	iounmap(adev->doorbell.ptr);
441 	adev->doorbell.ptr = NULL;
442 }
443 
444 /**
445  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
446  *                                setup amdkfd
447  *
448  * @adev: amdgpu_device pointer
449  * @aperture_base: output returning doorbell aperture base physical address
450  * @aperture_size: output returning doorbell aperture size in bytes
451  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
452  *
453  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
454  * takes doorbells required for its own rings and reports the setup to amdkfd.
455  * amdgpu reserved doorbells are at the start of the doorbell aperture.
456  */
457 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
458 				phys_addr_t *aperture_base,
459 				size_t *aperture_size,
460 				size_t *start_offset)
461 {
462 	/*
463 	 * The first num_doorbells are used by amdgpu.
464 	 * amdkfd takes whatever's left in the aperture.
465 	 */
466 	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
467 		*aperture_base = adev->doorbell.base;
468 		*aperture_size = adev->doorbell.size;
469 		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
470 	} else {
471 		*aperture_base = 0;
472 		*aperture_size = 0;
473 		*start_offset = 0;
474 	}
475 }
476 
477 /*
478  * amdgpu_wb_*()
479  * Writeback is the method by which the GPU updates special pages in memory
480  * with the status of certain GPU events (fences, ring pointers,etc.).
481  */
482 
483 /**
484  * amdgpu_wb_fini - Disable Writeback and free memory
485  *
486  * @adev: amdgpu_device pointer
487  *
488  * Disables Writeback and frees the Writeback memory (all asics).
489  * Used at driver shutdown.
490  */
491 static void amdgpu_wb_fini(struct amdgpu_device *adev)
492 {
493 	if (adev->wb.wb_obj) {
494 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
495 				      &adev->wb.gpu_addr,
496 				      (void **)&adev->wb.wb);
497 		adev->wb.wb_obj = NULL;
498 	}
499 }
500 
501 /**
502  * amdgpu_wb_init- Init Writeback driver info and allocate memory
503  *
504  * @adev: amdgpu_device pointer
505  *
506  * Initializes writeback and allocates writeback memory (all asics).
507  * Used at driver startup.
508  * Returns 0 on success or an -error on failure.
509  */
510 static int amdgpu_wb_init(struct amdgpu_device *adev)
511 {
512 	int r;
513 
514 	if (adev->wb.wb_obj == NULL) {
515 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
516 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
517 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
518 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
519 					    (void **)&adev->wb.wb);
520 		if (r) {
521 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
522 			return r;
523 		}
524 
525 		adev->wb.num_wb = AMDGPU_MAX_WB;
526 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
527 
528 		/* clear wb memory */
529 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
530 	}
531 
532 	return 0;
533 }
534 
535 /**
536  * amdgpu_wb_get - Allocate a wb entry
537  *
538  * @adev: amdgpu_device pointer
539  * @wb: wb index
540  *
541  * Allocate a wb slot for use by the driver (all asics).
542  * Returns 0 on success or -EINVAL on failure.
543  */
544 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
545 {
546 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
547 
548 	if (offset < adev->wb.num_wb) {
549 		__set_bit(offset, adev->wb.used);
550 		*wb = offset << 3; /* convert to dw offset */
551 		return 0;
552 	} else {
553 		return -EINVAL;
554 	}
555 }
556 
557 /**
558  * amdgpu_wb_free - Free a wb entry
559  *
560  * @adev: amdgpu_device pointer
561  * @wb: wb index
562  *
563  * Free a wb slot allocated for use by the driver (all asics)
564  */
565 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
566 {
567 	if (wb < adev->wb.num_wb)
568 		__clear_bit(wb >> 3, adev->wb.used);
569 }
570 
571 /**
572  * amdgpu_vram_location - try to find VRAM location
573  * @adev: amdgpu device structure holding all necessary informations
574  * @mc: memory controller structure holding memory informations
575  * @base: base address at which to put VRAM
576  *
577  * Function will try to place VRAM at base address provided
578  * as parameter (which is so far either PCI aperture address or
579  * for IGP TOM base address).
580  *
581  * If there is not enough space to fit the unvisible VRAM in the 32bits
582  * address space then we limit the VRAM size to the aperture.
583  *
584  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
585  * this shouldn't be a problem as we are using the PCI aperture as a reference.
586  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
587  * not IGP.
588  *
589  * Note: we use mc_vram_size as on some board we need to program the mc to
590  * cover the whole aperture even if VRAM size is inferior to aperture size
591  * Novell bug 204882 + along with lots of ubuntu ones
592  *
593  * Note: when limiting vram it's safe to overwritte real_vram_size because
594  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
595  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
596  * ones)
597  *
598  * Note: IGP TOM addr should be the same as the aperture addr, we don't
599  * explicitly check for that though.
600  *
601  * FIXME: when reducing VRAM size align new size on power of 2.
602  */
603 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
604 {
605 	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
606 
607 	mc->vram_start = base;
608 	if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
609 		dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
610 		mc->real_vram_size = mc->aper_size;
611 		mc->mc_vram_size = mc->aper_size;
612 	}
613 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
614 	if (limit && limit < mc->real_vram_size)
615 		mc->real_vram_size = limit;
616 	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
617 			mc->mc_vram_size >> 20, mc->vram_start,
618 			mc->vram_end, mc->real_vram_size >> 20);
619 }
620 
621 /**
622  * amdgpu_gart_location - try to find GTT location
623  * @adev: amdgpu device structure holding all necessary informations
624  * @mc: memory controller structure holding memory informations
625  *
626  * Function will place try to place GTT before or after VRAM.
627  *
628  * If GTT size is bigger than space left then we ajust GTT size.
629  * Thus function will never fails.
630  *
631  * FIXME: when reducing GTT size align new size on power of 2.
632  */
633 void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
634 {
635 	u64 size_af, size_bf;
636 
637 	size_af = adev->mc.mc_mask - mc->vram_end;
638 	size_bf = mc->vram_start;
639 	if (size_bf > size_af) {
640 		if (mc->gart_size > size_bf) {
641 			dev_warn(adev->dev, "limiting GTT\n");
642 			mc->gart_size = size_bf;
643 		}
644 		mc->gart_start = 0;
645 	} else {
646 		if (mc->gart_size > size_af) {
647 			dev_warn(adev->dev, "limiting GTT\n");
648 			mc->gart_size = size_af;
649 		}
650 		mc->gart_start = mc->vram_end + 1;
651 	}
652 	mc->gart_end = mc->gart_start + mc->gart_size - 1;
653 	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
654 			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
655 }
656 
657 /*
658  * Firmware Reservation functions
659  */
660 /**
661  * amdgpu_fw_reserve_vram_fini - free fw reserved vram
662  *
663  * @adev: amdgpu_device pointer
664  *
665  * free fw reserved vram if it has been reserved.
666  */
667 void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
668 {
669 	amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
670 		NULL, &adev->fw_vram_usage.va);
671 }
672 
673 /**
674  * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
675  *
676  * @adev: amdgpu_device pointer
677  *
678  * create bo vram reservation from fw.
679  */
680 int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
681 {
682 	int r = 0;
683 	u64 gpu_addr;
684 	u64 vram_size = adev->mc.visible_vram_size;
685 
686 	adev->fw_vram_usage.va = NULL;
687 	adev->fw_vram_usage.reserved_bo = NULL;
688 
689 	if (adev->fw_vram_usage.size > 0 &&
690 		adev->fw_vram_usage.size <= vram_size) {
691 
692 		r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
693 			PAGE_SIZE, true, 0,
694 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
695 			AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
696 			&adev->fw_vram_usage.reserved_bo);
697 		if (r)
698 			goto error_create;
699 
700 		r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
701 		if (r)
702 			goto error_reserve;
703 		r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
704 			AMDGPU_GEM_DOMAIN_VRAM,
705 			adev->fw_vram_usage.start_offset,
706 			(adev->fw_vram_usage.start_offset +
707 			adev->fw_vram_usage.size), &gpu_addr);
708 		if (r)
709 			goto error_pin;
710 		r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
711 			&adev->fw_vram_usage.va);
712 		if (r)
713 			goto error_kmap;
714 
715 		amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
716 	}
717 	return r;
718 
719 error_kmap:
720 	amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
721 error_pin:
722 	amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
723 error_reserve:
724 	amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
725 error_create:
726 	adev->fw_vram_usage.va = NULL;
727 	adev->fw_vram_usage.reserved_bo = NULL;
728 	return r;
729 }
730 
731 
732 /*
733  * GPU helpers function.
734  */
735 /**
736  * amdgpu_need_post - check if the hw need post or not
737  *
738  * @adev: amdgpu_device pointer
739  *
740  * Check if the asic has been initialized (all asics) at driver startup
741  * or post is needed if  hw reset is performed.
742  * Returns true if need or false if not.
743  */
744 bool amdgpu_need_post(struct amdgpu_device *adev)
745 {
746 	uint32_t reg;
747 
748 	if (amdgpu_sriov_vf(adev))
749 		return false;
750 
751 	if (amdgpu_passthrough(adev)) {
752 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
753 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
754 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
755 		 * vpost executed for smc version below 22.15
756 		 */
757 		if (adev->asic_type == CHIP_FIJI) {
758 			int err;
759 			uint32_t fw_ver;
760 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
761 			/* force vPost if error occured */
762 			if (err)
763 				return true;
764 
765 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
766 			if (fw_ver < 0x00160e00)
767 				return true;
768 		}
769 	}
770 
771 	if (adev->has_hw_reset) {
772 		adev->has_hw_reset = false;
773 		return true;
774 	}
775 
776 	/* bios scratch used on CIK+ */
777 	if (adev->asic_type >= CHIP_BONAIRE)
778 		return amdgpu_atombios_scratch_need_asic_init(adev);
779 
780 	/* check MEM_SIZE for older asics */
781 	reg = amdgpu_asic_get_config_memsize(adev);
782 
783 	if ((reg != 0) && (reg != 0xffffffff))
784 		return false;
785 
786 	return true;
787 }
788 
789 /**
790  * amdgpu_dummy_page_init - init dummy page used by the driver
791  *
792  * @adev: amdgpu_device pointer
793  *
794  * Allocate the dummy page used by the driver (all asics).
795  * This dummy page is used by the driver as a filler for gart entries
796  * when pages are taken out of the GART
797  * Returns 0 on sucess, -ENOMEM on failure.
798  */
799 int amdgpu_dummy_page_init(struct amdgpu_device *adev)
800 {
801 	if (adev->dummy_page.page)
802 		return 0;
803 	adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
804 	if (adev->dummy_page.page == NULL)
805 		return -ENOMEM;
806 	adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
807 					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
808 	if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
809 		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
810 		__free_page(adev->dummy_page.page);
811 		adev->dummy_page.page = NULL;
812 		return -ENOMEM;
813 	}
814 	return 0;
815 }
816 
817 /**
818  * amdgpu_dummy_page_fini - free dummy page used by the driver
819  *
820  * @adev: amdgpu_device pointer
821  *
822  * Frees the dummy page used by the driver (all asics).
823  */
824 void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
825 {
826 	if (adev->dummy_page.page == NULL)
827 		return;
828 	pci_unmap_page(adev->pdev, adev->dummy_page.addr,
829 			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
830 	__free_page(adev->dummy_page.page);
831 	adev->dummy_page.page = NULL;
832 }
833 
834 
835 /* ATOM accessor methods */
836 /*
837  * ATOM is an interpreted byte code stored in tables in the vbios.  The
838  * driver registers callbacks to access registers and the interpreter
839  * in the driver parses the tables and executes then to program specific
840  * actions (set display modes, asic init, etc.).  See amdgpu_atombios.c,
841  * atombios.h, and atom.c
842  */
843 
844 /**
845  * cail_pll_read - read PLL register
846  *
847  * @info: atom card_info pointer
848  * @reg: PLL register offset
849  *
850  * Provides a PLL register accessor for the atom interpreter (r4xx+).
851  * Returns the value of the PLL register.
852  */
853 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
854 {
855 	return 0;
856 }
857 
858 /**
859  * cail_pll_write - write PLL register
860  *
861  * @info: atom card_info pointer
862  * @reg: PLL register offset
863  * @val: value to write to the pll register
864  *
865  * Provides a PLL register accessor for the atom interpreter (r4xx+).
866  */
867 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
868 {
869 
870 }
871 
872 /**
873  * cail_mc_read - read MC (Memory Controller) register
874  *
875  * @info: atom card_info pointer
876  * @reg: MC register offset
877  *
878  * Provides an MC register accessor for the atom interpreter (r4xx+).
879  * Returns the value of the MC register.
880  */
881 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
882 {
883 	return 0;
884 }
885 
886 /**
887  * cail_mc_write - write MC (Memory Controller) register
888  *
889  * @info: atom card_info pointer
890  * @reg: MC register offset
891  * @val: value to write to the pll register
892  *
893  * Provides a MC register accessor for the atom interpreter (r4xx+).
894  */
895 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
896 {
897 
898 }
899 
900 /**
901  * cail_reg_write - write MMIO register
902  *
903  * @info: atom card_info pointer
904  * @reg: MMIO register offset
905  * @val: value to write to the pll register
906  *
907  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
908  */
909 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
910 {
911 	struct amdgpu_device *adev = info->dev->dev_private;
912 
913 	WREG32(reg, val);
914 }
915 
916 /**
917  * cail_reg_read - read MMIO register
918  *
919  * @info: atom card_info pointer
920  * @reg: MMIO register offset
921  *
922  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
923  * Returns the value of the MMIO register.
924  */
925 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
926 {
927 	struct amdgpu_device *adev = info->dev->dev_private;
928 	uint32_t r;
929 
930 	r = RREG32(reg);
931 	return r;
932 }
933 
934 /**
935  * cail_ioreg_write - write IO register
936  *
937  * @info: atom card_info pointer
938  * @reg: IO register offset
939  * @val: value to write to the pll register
940  *
941  * Provides a IO register accessor for the atom interpreter (r4xx+).
942  */
943 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
944 {
945 	struct amdgpu_device *adev = info->dev->dev_private;
946 
947 	WREG32_IO(reg, val);
948 }
949 
950 /**
951  * cail_ioreg_read - read IO register
952  *
953  * @info: atom card_info pointer
954  * @reg: IO register offset
955  *
956  * Provides an IO register accessor for the atom interpreter (r4xx+).
957  * Returns the value of the IO register.
958  */
959 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
960 {
961 	struct amdgpu_device *adev = info->dev->dev_private;
962 	uint32_t r;
963 
964 	r = RREG32_IO(reg);
965 	return r;
966 }
967 
968 static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
969 						 struct device_attribute *attr,
970 						 char *buf)
971 {
972 	struct drm_device *ddev = dev_get_drvdata(dev);
973 	struct amdgpu_device *adev = ddev->dev_private;
974 	struct atom_context *ctx = adev->mode_info.atom_context;
975 
976 	return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
977 }
978 
979 static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
980 		   NULL);
981 
982 /**
983  * amdgpu_atombios_fini - free the driver info and callbacks for atombios
984  *
985  * @adev: amdgpu_device pointer
986  *
987  * Frees the driver info and register access callbacks for the ATOM
988  * interpreter (r4xx+).
989  * Called at driver shutdown.
990  */
991 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
992 {
993 	if (adev->mode_info.atom_context) {
994 		kfree(adev->mode_info.atom_context->scratch);
995 		kfree(adev->mode_info.atom_context->iio);
996 	}
997 	kfree(adev->mode_info.atom_context);
998 	adev->mode_info.atom_context = NULL;
999 	kfree(adev->mode_info.atom_card_info);
1000 	adev->mode_info.atom_card_info = NULL;
1001 	device_remove_file(adev->dev, &dev_attr_vbios_version);
1002 }
1003 
1004 /**
1005  * amdgpu_atombios_init - init the driver info and callbacks for atombios
1006  *
1007  * @adev: amdgpu_device pointer
1008  *
1009  * Initializes the driver info and register access callbacks for the
1010  * ATOM interpreter (r4xx+).
1011  * Returns 0 on sucess, -ENOMEM on failure.
1012  * Called at driver startup.
1013  */
1014 static int amdgpu_atombios_init(struct amdgpu_device *adev)
1015 {
1016 	struct card_info *atom_card_info =
1017 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
1018 	int ret;
1019 
1020 	if (!atom_card_info)
1021 		return -ENOMEM;
1022 
1023 	adev->mode_info.atom_card_info = atom_card_info;
1024 	atom_card_info->dev = adev->ddev;
1025 	atom_card_info->reg_read = cail_reg_read;
1026 	atom_card_info->reg_write = cail_reg_write;
1027 	/* needed for iio ops */
1028 	if (adev->rio_mem) {
1029 		atom_card_info->ioreg_read = cail_ioreg_read;
1030 		atom_card_info->ioreg_write = cail_ioreg_write;
1031 	} else {
1032 		DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
1033 		atom_card_info->ioreg_read = cail_reg_read;
1034 		atom_card_info->ioreg_write = cail_reg_write;
1035 	}
1036 	atom_card_info->mc_read = cail_mc_read;
1037 	atom_card_info->mc_write = cail_mc_write;
1038 	atom_card_info->pll_read = cail_pll_read;
1039 	atom_card_info->pll_write = cail_pll_write;
1040 
1041 	adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1042 	if (!adev->mode_info.atom_context) {
1043 		amdgpu_atombios_fini(adev);
1044 		return -ENOMEM;
1045 	}
1046 
1047 	mutex_init(&adev->mode_info.atom_context->mutex);
1048 	if (adev->is_atom_fw) {
1049 		amdgpu_atomfirmware_scratch_regs_init(adev);
1050 		amdgpu_atomfirmware_allocate_fb_scratch(adev);
1051 	} else {
1052 		amdgpu_atombios_scratch_regs_init(adev);
1053 		amdgpu_atombios_allocate_fb_scratch(adev);
1054 	}
1055 
1056 	ret = device_create_file(adev->dev, &dev_attr_vbios_version);
1057 	if (ret) {
1058 		DRM_ERROR("Failed to create device file for VBIOS version\n");
1059 		return ret;
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 /* if we get transitioned to only one device, take VGA back */
1066 /**
1067  * amdgpu_vga_set_decode - enable/disable vga decode
1068  *
1069  * @cookie: amdgpu_device pointer
1070  * @state: enable/disable vga decode
1071  *
1072  * Enable/disable vga decode (all asics).
1073  * Returns VGA resource flags.
1074  */
1075 static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1076 {
1077 	struct amdgpu_device *adev = cookie;
1078 	amdgpu_asic_set_vga_state(adev, state);
1079 	if (state)
1080 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1081 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1082 	else
1083 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1084 }
1085 
1086 static void amdgpu_check_block_size(struct amdgpu_device *adev)
1087 {
1088 	/* defines number of bits in page table versus page directory,
1089 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1090 	 * page table and the remaining bits are in the page directory */
1091 	if (amdgpu_vm_block_size == -1)
1092 		return;
1093 
1094 	if (amdgpu_vm_block_size < 9) {
1095 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1096 			 amdgpu_vm_block_size);
1097 		goto def_value;
1098 	}
1099 
1100 	if (amdgpu_vm_block_size > 24 ||
1101 	    (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1102 		dev_warn(adev->dev, "VM page table size (%d) too large\n",
1103 			 amdgpu_vm_block_size);
1104 		goto def_value;
1105 	}
1106 
1107 	return;
1108 
1109 def_value:
1110 	amdgpu_vm_block_size = -1;
1111 }
1112 
1113 static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1114 {
1115 	/* no need to check the default value */
1116 	if (amdgpu_vm_size == -1)
1117 		return;
1118 
1119 	if (!is_power_of_2(amdgpu_vm_size)) {
1120 		dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1121 			 amdgpu_vm_size);
1122 		goto def_value;
1123 	}
1124 
1125 	if (amdgpu_vm_size < 1) {
1126 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1127 			 amdgpu_vm_size);
1128 		goto def_value;
1129 	}
1130 
1131 	/*
1132 	 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1133 	 */
1134 	if (amdgpu_vm_size > 1024) {
1135 		dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1136 			 amdgpu_vm_size);
1137 		goto def_value;
1138 	}
1139 
1140 	return;
1141 
1142 def_value:
1143 	amdgpu_vm_size = -1;
1144 }
1145 
1146 /**
1147  * amdgpu_check_arguments - validate module params
1148  *
1149  * @adev: amdgpu_device pointer
1150  *
1151  * Validates certain module parameters and updates
1152  * the associated values used by the driver (all asics).
1153  */
1154 static void amdgpu_check_arguments(struct amdgpu_device *adev)
1155 {
1156 	if (amdgpu_sched_jobs < 4) {
1157 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1158 			 amdgpu_sched_jobs);
1159 		amdgpu_sched_jobs = 4;
1160 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1161 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1162 			 amdgpu_sched_jobs);
1163 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1164 	}
1165 
1166 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1167 		/* gart size must be greater or equal to 32M */
1168 		dev_warn(adev->dev, "gart size (%d) too small\n",
1169 			 amdgpu_gart_size);
1170 		amdgpu_gart_size = -1;
1171 	}
1172 
1173 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1174 		/* gtt size must be greater or equal to 32M */
1175 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1176 				 amdgpu_gtt_size);
1177 		amdgpu_gtt_size = -1;
1178 	}
1179 
1180 	/* valid range is between 4 and 9 inclusive */
1181 	if (amdgpu_vm_fragment_size != -1 &&
1182 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1183 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1184 		amdgpu_vm_fragment_size = -1;
1185 	}
1186 
1187 	amdgpu_check_vm_size(adev);
1188 
1189 	amdgpu_check_block_size(adev);
1190 
1191 	if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1192 	    !is_power_of_2(amdgpu_vram_page_split))) {
1193 		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1194 			 amdgpu_vram_page_split);
1195 		amdgpu_vram_page_split = 1024;
1196 	}
1197 }
1198 
1199 /**
1200  * amdgpu_switcheroo_set_state - set switcheroo state
1201  *
1202  * @pdev: pci dev pointer
1203  * @state: vga_switcheroo state
1204  *
1205  * Callback for the switcheroo driver.  Suspends or resumes the
1206  * the asics before or after it is powered up using ACPI methods.
1207  */
1208 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1209 {
1210 	struct drm_device *dev = pci_get_drvdata(pdev);
1211 
1212 	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1213 		return;
1214 
1215 	if (state == VGA_SWITCHEROO_ON) {
1216 		pr_info("amdgpu: switched on\n");
1217 		/* don't suspend or resume card normally */
1218 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1219 
1220 		amdgpu_device_resume(dev, true, true);
1221 
1222 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1223 		drm_kms_helper_poll_enable(dev);
1224 	} else {
1225 		pr_info("amdgpu: switched off\n");
1226 		drm_kms_helper_poll_disable(dev);
1227 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1228 		amdgpu_device_suspend(dev, true, true);
1229 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1230 	}
1231 }
1232 
1233 /**
1234  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1235  *
1236  * @pdev: pci dev pointer
1237  *
1238  * Callback for the switcheroo driver.  Check of the switcheroo
1239  * state can be changed.
1240  * Returns true if the state can be changed, false if not.
1241  */
1242 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1243 {
1244 	struct drm_device *dev = pci_get_drvdata(pdev);
1245 
1246 	/*
1247 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1248 	* locking inversion with the driver load path. And the access here is
1249 	* completely racy anyway. So don't bother with locking for now.
1250 	*/
1251 	return dev->open_count == 0;
1252 }
1253 
1254 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1255 	.set_gpu_state = amdgpu_switcheroo_set_state,
1256 	.reprobe = NULL,
1257 	.can_switch = amdgpu_switcheroo_can_switch,
1258 };
1259 
1260 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1261 				  enum amd_ip_block_type block_type,
1262 				  enum amd_clockgating_state state)
1263 {
1264 	int i, r = 0;
1265 
1266 	for (i = 0; i < adev->num_ip_blocks; i++) {
1267 		if (!adev->ip_blocks[i].status.valid)
1268 			continue;
1269 		if (adev->ip_blocks[i].version->type != block_type)
1270 			continue;
1271 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1272 			continue;
1273 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1274 			(void *)adev, state);
1275 		if (r)
1276 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1277 				  adev->ip_blocks[i].version->funcs->name, r);
1278 	}
1279 	return r;
1280 }
1281 
1282 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1283 				  enum amd_ip_block_type block_type,
1284 				  enum amd_powergating_state state)
1285 {
1286 	int i, r = 0;
1287 
1288 	for (i = 0; i < adev->num_ip_blocks; i++) {
1289 		if (!adev->ip_blocks[i].status.valid)
1290 			continue;
1291 		if (adev->ip_blocks[i].version->type != block_type)
1292 			continue;
1293 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1294 			continue;
1295 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1296 			(void *)adev, state);
1297 		if (r)
1298 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1299 				  adev->ip_blocks[i].version->funcs->name, r);
1300 	}
1301 	return r;
1302 }
1303 
1304 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1305 {
1306 	int i;
1307 
1308 	for (i = 0; i < adev->num_ip_blocks; i++) {
1309 		if (!adev->ip_blocks[i].status.valid)
1310 			continue;
1311 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1312 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1313 	}
1314 }
1315 
1316 int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1317 			 enum amd_ip_block_type block_type)
1318 {
1319 	int i, r;
1320 
1321 	for (i = 0; i < adev->num_ip_blocks; i++) {
1322 		if (!adev->ip_blocks[i].status.valid)
1323 			continue;
1324 		if (adev->ip_blocks[i].version->type == block_type) {
1325 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1326 			if (r)
1327 				return r;
1328 			break;
1329 		}
1330 	}
1331 	return 0;
1332 
1333 }
1334 
1335 bool amdgpu_is_idle(struct amdgpu_device *adev,
1336 		    enum amd_ip_block_type block_type)
1337 {
1338 	int i;
1339 
1340 	for (i = 0; i < adev->num_ip_blocks; i++) {
1341 		if (!adev->ip_blocks[i].status.valid)
1342 			continue;
1343 		if (adev->ip_blocks[i].version->type == block_type)
1344 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1345 	}
1346 	return true;
1347 
1348 }
1349 
1350 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1351 					     enum amd_ip_block_type type)
1352 {
1353 	int i;
1354 
1355 	for (i = 0; i < adev->num_ip_blocks; i++)
1356 		if (adev->ip_blocks[i].version->type == type)
1357 			return &adev->ip_blocks[i];
1358 
1359 	return NULL;
1360 }
1361 
1362 /**
1363  * amdgpu_ip_block_version_cmp
1364  *
1365  * @adev: amdgpu_device pointer
1366  * @type: enum amd_ip_block_type
1367  * @major: major version
1368  * @minor: minor version
1369  *
1370  * return 0 if equal or greater
1371  * return 1 if smaller or the ip_block doesn't exist
1372  */
1373 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1374 				enum amd_ip_block_type type,
1375 				u32 major, u32 minor)
1376 {
1377 	struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1378 
1379 	if (ip_block && ((ip_block->version->major > major) ||
1380 			((ip_block->version->major == major) &&
1381 			(ip_block->version->minor >= minor))))
1382 		return 0;
1383 
1384 	return 1;
1385 }
1386 
1387 /**
1388  * amdgpu_ip_block_add
1389  *
1390  * @adev: amdgpu_device pointer
1391  * @ip_block_version: pointer to the IP to add
1392  *
1393  * Adds the IP block driver information to the collection of IPs
1394  * on the asic.
1395  */
1396 int amdgpu_ip_block_add(struct amdgpu_device *adev,
1397 			const struct amdgpu_ip_block_version *ip_block_version)
1398 {
1399 	if (!ip_block_version)
1400 		return -EINVAL;
1401 
1402 	DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1403 		  ip_block_version->funcs->name);
1404 
1405 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1406 
1407 	return 0;
1408 }
1409 
1410 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1411 {
1412 	adev->enable_virtual_display = false;
1413 
1414 	if (amdgpu_virtual_display) {
1415 		struct drm_device *ddev = adev->ddev;
1416 		const char *pci_address_name = pci_name(ddev->pdev);
1417 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1418 
1419 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1420 		pciaddstr_tmp = pciaddstr;
1421 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1422 			pciaddname = strsep(&pciaddname_tmp, ",");
1423 			if (!strcmp("all", pciaddname)
1424 			    || !strcmp(pci_address_name, pciaddname)) {
1425 				long num_crtc;
1426 				int res = -1;
1427 
1428 				adev->enable_virtual_display = true;
1429 
1430 				if (pciaddname_tmp)
1431 					res = kstrtol(pciaddname_tmp, 10,
1432 						      &num_crtc);
1433 
1434 				if (!res) {
1435 					if (num_crtc < 1)
1436 						num_crtc = 1;
1437 					if (num_crtc > 6)
1438 						num_crtc = 6;
1439 					adev->mode_info.num_crtc = num_crtc;
1440 				} else {
1441 					adev->mode_info.num_crtc = 1;
1442 				}
1443 				break;
1444 			}
1445 		}
1446 
1447 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1448 			 amdgpu_virtual_display, pci_address_name,
1449 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1450 
1451 		kfree(pciaddstr);
1452 	}
1453 }
1454 
1455 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1456 {
1457 	const char *chip_name;
1458 	char fw_name[30];
1459 	int err;
1460 	const struct gpu_info_firmware_header_v1_0 *hdr;
1461 
1462 	adev->firmware.gpu_info_fw = NULL;
1463 
1464 	switch (adev->asic_type) {
1465 	case CHIP_TOPAZ:
1466 	case CHIP_TONGA:
1467 	case CHIP_FIJI:
1468 	case CHIP_POLARIS11:
1469 	case CHIP_POLARIS10:
1470 	case CHIP_POLARIS12:
1471 	case CHIP_CARRIZO:
1472 	case CHIP_STONEY:
1473 #ifdef CONFIG_DRM_AMDGPU_SI
1474 	case CHIP_VERDE:
1475 	case CHIP_TAHITI:
1476 	case CHIP_PITCAIRN:
1477 	case CHIP_OLAND:
1478 	case CHIP_HAINAN:
1479 #endif
1480 #ifdef CONFIG_DRM_AMDGPU_CIK
1481 	case CHIP_BONAIRE:
1482 	case CHIP_HAWAII:
1483 	case CHIP_KAVERI:
1484 	case CHIP_KABINI:
1485 	case CHIP_MULLINS:
1486 #endif
1487 	default:
1488 		return 0;
1489 	case CHIP_VEGA10:
1490 		chip_name = "vega10";
1491 		break;
1492 	case CHIP_RAVEN:
1493 		chip_name = "raven";
1494 		break;
1495 	}
1496 
1497 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1498 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1499 	if (err) {
1500 		dev_err(adev->dev,
1501 			"Failed to load gpu_info firmware \"%s\"\n",
1502 			fw_name);
1503 		goto out;
1504 	}
1505 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1506 	if (err) {
1507 		dev_err(adev->dev,
1508 			"Failed to validate gpu_info firmware \"%s\"\n",
1509 			fw_name);
1510 		goto out;
1511 	}
1512 
1513 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1514 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1515 
1516 	switch (hdr->version_major) {
1517 	case 1:
1518 	{
1519 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1520 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1521 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1522 
1523 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1524 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1525 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1526 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1527 		adev->gfx.config.max_texture_channel_caches =
1528 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1529 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1530 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1531 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1532 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1533 		adev->gfx.config.double_offchip_lds_buf =
1534 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1535 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1536 		adev->gfx.cu_info.max_waves_per_simd =
1537 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1538 		adev->gfx.cu_info.max_scratch_slots_per_cu =
1539 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1540 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1541 		break;
1542 	}
1543 	default:
1544 		dev_err(adev->dev,
1545 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1546 		err = -EINVAL;
1547 		goto out;
1548 	}
1549 out:
1550 	return err;
1551 }
1552 
1553 static int amdgpu_early_init(struct amdgpu_device *adev)
1554 {
1555 	int i, r;
1556 
1557 	amdgpu_device_enable_virtual_display(adev);
1558 
1559 	switch (adev->asic_type) {
1560 	case CHIP_TOPAZ:
1561 	case CHIP_TONGA:
1562 	case CHIP_FIJI:
1563 	case CHIP_POLARIS11:
1564 	case CHIP_POLARIS10:
1565 	case CHIP_POLARIS12:
1566 	case CHIP_CARRIZO:
1567 	case CHIP_STONEY:
1568 		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1569 			adev->family = AMDGPU_FAMILY_CZ;
1570 		else
1571 			adev->family = AMDGPU_FAMILY_VI;
1572 
1573 		r = vi_set_ip_blocks(adev);
1574 		if (r)
1575 			return r;
1576 		break;
1577 #ifdef CONFIG_DRM_AMDGPU_SI
1578 	case CHIP_VERDE:
1579 	case CHIP_TAHITI:
1580 	case CHIP_PITCAIRN:
1581 	case CHIP_OLAND:
1582 	case CHIP_HAINAN:
1583 		adev->family = AMDGPU_FAMILY_SI;
1584 		r = si_set_ip_blocks(adev);
1585 		if (r)
1586 			return r;
1587 		break;
1588 #endif
1589 #ifdef CONFIG_DRM_AMDGPU_CIK
1590 	case CHIP_BONAIRE:
1591 	case CHIP_HAWAII:
1592 	case CHIP_KAVERI:
1593 	case CHIP_KABINI:
1594 	case CHIP_MULLINS:
1595 		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1596 			adev->family = AMDGPU_FAMILY_CI;
1597 		else
1598 			adev->family = AMDGPU_FAMILY_KV;
1599 
1600 		r = cik_set_ip_blocks(adev);
1601 		if (r)
1602 			return r;
1603 		break;
1604 #endif
1605 	case  CHIP_VEGA10:
1606 	case  CHIP_RAVEN:
1607 		if (adev->asic_type == CHIP_RAVEN)
1608 			adev->family = AMDGPU_FAMILY_RV;
1609 		else
1610 			adev->family = AMDGPU_FAMILY_AI;
1611 
1612 		r = soc15_set_ip_blocks(adev);
1613 		if (r)
1614 			return r;
1615 		break;
1616 	default:
1617 		/* FIXME: not supported yet */
1618 		return -EINVAL;
1619 	}
1620 
1621 	r = amdgpu_device_parse_gpu_info_fw(adev);
1622 	if (r)
1623 		return r;
1624 
1625 	if (amdgpu_sriov_vf(adev)) {
1626 		r = amdgpu_virt_request_full_gpu(adev, true);
1627 		if (r)
1628 			return r;
1629 	}
1630 
1631 	for (i = 0; i < adev->num_ip_blocks; i++) {
1632 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1633 			DRM_ERROR("disabled ip block: %d <%s>\n",
1634 				  i, adev->ip_blocks[i].version->funcs->name);
1635 			adev->ip_blocks[i].status.valid = false;
1636 		} else {
1637 			if (adev->ip_blocks[i].version->funcs->early_init) {
1638 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1639 				if (r == -ENOENT) {
1640 					adev->ip_blocks[i].status.valid = false;
1641 				} else if (r) {
1642 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
1643 						  adev->ip_blocks[i].version->funcs->name, r);
1644 					return r;
1645 				} else {
1646 					adev->ip_blocks[i].status.valid = true;
1647 				}
1648 			} else {
1649 				adev->ip_blocks[i].status.valid = true;
1650 			}
1651 		}
1652 	}
1653 
1654 	adev->cg_flags &= amdgpu_cg_mask;
1655 	adev->pg_flags &= amdgpu_pg_mask;
1656 
1657 	return 0;
1658 }
1659 
1660 static int amdgpu_init(struct amdgpu_device *adev)
1661 {
1662 	int i, r;
1663 
1664 	for (i = 0; i < adev->num_ip_blocks; i++) {
1665 		if (!adev->ip_blocks[i].status.valid)
1666 			continue;
1667 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1668 		if (r) {
1669 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1670 				  adev->ip_blocks[i].version->funcs->name, r);
1671 			return r;
1672 		}
1673 		adev->ip_blocks[i].status.sw = true;
1674 		/* need to do gmc hw init early so we can allocate gpu mem */
1675 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1676 			r = amdgpu_vram_scratch_init(adev);
1677 			if (r) {
1678 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1679 				return r;
1680 			}
1681 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1682 			if (r) {
1683 				DRM_ERROR("hw_init %d failed %d\n", i, r);
1684 				return r;
1685 			}
1686 			r = amdgpu_wb_init(adev);
1687 			if (r) {
1688 				DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1689 				return r;
1690 			}
1691 			adev->ip_blocks[i].status.hw = true;
1692 
1693 			/* right after GMC hw init, we create CSA */
1694 			if (amdgpu_sriov_vf(adev)) {
1695 				r = amdgpu_allocate_static_csa(adev);
1696 				if (r) {
1697 					DRM_ERROR("allocate CSA failed %d\n", r);
1698 					return r;
1699 				}
1700 			}
1701 		}
1702 	}
1703 
1704 	for (i = 0; i < adev->num_ip_blocks; i++) {
1705 		if (!adev->ip_blocks[i].status.sw)
1706 			continue;
1707 		/* gmc hw init is done early */
1708 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1709 			continue;
1710 		r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1711 		if (r) {
1712 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1713 				  adev->ip_blocks[i].version->funcs->name, r);
1714 			return r;
1715 		}
1716 		adev->ip_blocks[i].status.hw = true;
1717 	}
1718 
1719 	return 0;
1720 }
1721 
1722 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1723 {
1724 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1725 }
1726 
1727 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1728 {
1729 	return !!memcmp(adev->gart.ptr, adev->reset_magic,
1730 			AMDGPU_RESET_MAGIC_NUM);
1731 }
1732 
1733 static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1734 {
1735 	int i = 0, r;
1736 
1737 	for (i = 0; i < adev->num_ip_blocks; i++) {
1738 		if (!adev->ip_blocks[i].status.valid)
1739 			continue;
1740 		/* skip CG for VCE/UVD, it's handled specially */
1741 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1742 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1743 			/* enable clockgating to save power */
1744 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1745 										     AMD_CG_STATE_GATE);
1746 			if (r) {
1747 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1748 					  adev->ip_blocks[i].version->funcs->name, r);
1749 				return r;
1750 			}
1751 		}
1752 	}
1753 	return 0;
1754 }
1755 
1756 static int amdgpu_late_init(struct amdgpu_device *adev)
1757 {
1758 	int i = 0, r;
1759 
1760 	for (i = 0; i < adev->num_ip_blocks; i++) {
1761 		if (!adev->ip_blocks[i].status.valid)
1762 			continue;
1763 		if (adev->ip_blocks[i].version->funcs->late_init) {
1764 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1765 			if (r) {
1766 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
1767 					  adev->ip_blocks[i].version->funcs->name, r);
1768 				return r;
1769 			}
1770 			adev->ip_blocks[i].status.late_initialized = true;
1771 		}
1772 	}
1773 
1774 	mod_delayed_work(system_wq, &adev->late_init_work,
1775 			msecs_to_jiffies(AMDGPU_RESUME_MS));
1776 
1777 	amdgpu_fill_reset_magic(adev);
1778 
1779 	return 0;
1780 }
1781 
1782 static int amdgpu_fini(struct amdgpu_device *adev)
1783 {
1784 	int i, r;
1785 
1786 	/* need to disable SMC first */
1787 	for (i = 0; i < adev->num_ip_blocks; i++) {
1788 		if (!adev->ip_blocks[i].status.hw)
1789 			continue;
1790 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1791 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1792 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1793 										     AMD_CG_STATE_UNGATE);
1794 			if (r) {
1795 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1796 					  adev->ip_blocks[i].version->funcs->name, r);
1797 				return r;
1798 			}
1799 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1800 			/* XXX handle errors */
1801 			if (r) {
1802 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1803 					  adev->ip_blocks[i].version->funcs->name, r);
1804 			}
1805 			adev->ip_blocks[i].status.hw = false;
1806 			break;
1807 		}
1808 	}
1809 
1810 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1811 		if (!adev->ip_blocks[i].status.hw)
1812 			continue;
1813 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1814 			amdgpu_wb_fini(adev);
1815 			amdgpu_vram_scratch_fini(adev);
1816 		}
1817 
1818 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1819 			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1820 			/* ungate blocks before hw fini so that we can shutdown the blocks safely */
1821 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1822 										     AMD_CG_STATE_UNGATE);
1823 			if (r) {
1824 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1825 					  adev->ip_blocks[i].version->funcs->name, r);
1826 				return r;
1827 			}
1828 		}
1829 
1830 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1831 		/* XXX handle errors */
1832 		if (r) {
1833 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1834 				  adev->ip_blocks[i].version->funcs->name, r);
1835 		}
1836 
1837 		adev->ip_blocks[i].status.hw = false;
1838 	}
1839 
1840 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1841 		if (!adev->ip_blocks[i].status.sw)
1842 			continue;
1843 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1844 		/* XXX handle errors */
1845 		if (r) {
1846 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1847 				  adev->ip_blocks[i].version->funcs->name, r);
1848 		}
1849 		adev->ip_blocks[i].status.sw = false;
1850 		adev->ip_blocks[i].status.valid = false;
1851 	}
1852 
1853 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1854 		if (!adev->ip_blocks[i].status.late_initialized)
1855 			continue;
1856 		if (adev->ip_blocks[i].version->funcs->late_fini)
1857 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1858 		adev->ip_blocks[i].status.late_initialized = false;
1859 	}
1860 
1861 	if (amdgpu_sriov_vf(adev))
1862 		amdgpu_virt_release_full_gpu(adev, false);
1863 
1864 	return 0;
1865 }
1866 
1867 static void amdgpu_late_init_func_handler(struct work_struct *work)
1868 {
1869 	struct amdgpu_device *adev =
1870 		container_of(work, struct amdgpu_device, late_init_work.work);
1871 	amdgpu_late_set_cg_state(adev);
1872 }
1873 
1874 int amdgpu_suspend(struct amdgpu_device *adev)
1875 {
1876 	int i, r;
1877 
1878 	if (amdgpu_sriov_vf(adev))
1879 		amdgpu_virt_request_full_gpu(adev, false);
1880 
1881 	/* ungate SMC block first */
1882 	r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1883 					 AMD_CG_STATE_UNGATE);
1884 	if (r) {
1885 		DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1886 	}
1887 
1888 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1889 		if (!adev->ip_blocks[i].status.valid)
1890 			continue;
1891 		/* ungate blocks so that suspend can properly shut them down */
1892 		if (i != AMD_IP_BLOCK_TYPE_SMC) {
1893 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1894 										     AMD_CG_STATE_UNGATE);
1895 			if (r) {
1896 				DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1897 					  adev->ip_blocks[i].version->funcs->name, r);
1898 			}
1899 		}
1900 		/* XXX handle errors */
1901 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
1902 		/* XXX handle errors */
1903 		if (r) {
1904 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
1905 				  adev->ip_blocks[i].version->funcs->name, r);
1906 		}
1907 	}
1908 
1909 	if (amdgpu_sriov_vf(adev))
1910 		amdgpu_virt_release_full_gpu(adev, false);
1911 
1912 	return 0;
1913 }
1914 
1915 static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
1916 {
1917 	int i, r;
1918 
1919 	static enum amd_ip_block_type ip_order[] = {
1920 		AMD_IP_BLOCK_TYPE_GMC,
1921 		AMD_IP_BLOCK_TYPE_COMMON,
1922 		AMD_IP_BLOCK_TYPE_IH,
1923 	};
1924 
1925 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1926 		int j;
1927 		struct amdgpu_ip_block *block;
1928 
1929 		for (j = 0; j < adev->num_ip_blocks; j++) {
1930 			block = &adev->ip_blocks[j];
1931 
1932 			if (block->version->type != ip_order[i] ||
1933 				!block->status.valid)
1934 				continue;
1935 
1936 			r = block->version->funcs->hw_init(adev);
1937 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1938 		}
1939 	}
1940 
1941 	return 0;
1942 }
1943 
1944 static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
1945 {
1946 	int i, r;
1947 
1948 	static enum amd_ip_block_type ip_order[] = {
1949 		AMD_IP_BLOCK_TYPE_SMC,
1950 		AMD_IP_BLOCK_TYPE_PSP,
1951 		AMD_IP_BLOCK_TYPE_DCE,
1952 		AMD_IP_BLOCK_TYPE_GFX,
1953 		AMD_IP_BLOCK_TYPE_SDMA,
1954 		AMD_IP_BLOCK_TYPE_UVD,
1955 		AMD_IP_BLOCK_TYPE_VCE
1956 	};
1957 
1958 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1959 		int j;
1960 		struct amdgpu_ip_block *block;
1961 
1962 		for (j = 0; j < adev->num_ip_blocks; j++) {
1963 			block = &adev->ip_blocks[j];
1964 
1965 			if (block->version->type != ip_order[i] ||
1966 				!block->status.valid)
1967 				continue;
1968 
1969 			r = block->version->funcs->hw_init(adev);
1970 			DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1971 		}
1972 	}
1973 
1974 	return 0;
1975 }
1976 
1977 static int amdgpu_resume_phase1(struct amdgpu_device *adev)
1978 {
1979 	int i, r;
1980 
1981 	for (i = 0; i < adev->num_ip_blocks; i++) {
1982 		if (!adev->ip_blocks[i].status.valid)
1983 			continue;
1984 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1985 				adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1986 				adev->ip_blocks[i].version->type ==
1987 				AMD_IP_BLOCK_TYPE_IH) {
1988 			r = adev->ip_blocks[i].version->funcs->resume(adev);
1989 			if (r) {
1990 				DRM_ERROR("resume of IP block <%s> failed %d\n",
1991 					  adev->ip_blocks[i].version->funcs->name, r);
1992 				return r;
1993 			}
1994 		}
1995 	}
1996 
1997 	return 0;
1998 }
1999 
2000 static int amdgpu_resume_phase2(struct amdgpu_device *adev)
2001 {
2002 	int i, r;
2003 
2004 	for (i = 0; i < adev->num_ip_blocks; i++) {
2005 		if (!adev->ip_blocks[i].status.valid)
2006 			continue;
2007 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2008 				adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2009 				adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
2010 			continue;
2011 		r = adev->ip_blocks[i].version->funcs->resume(adev);
2012 		if (r) {
2013 			DRM_ERROR("resume of IP block <%s> failed %d\n",
2014 				  adev->ip_blocks[i].version->funcs->name, r);
2015 			return r;
2016 		}
2017 	}
2018 
2019 	return 0;
2020 }
2021 
2022 static int amdgpu_resume(struct amdgpu_device *adev)
2023 {
2024 	int r;
2025 
2026 	r = amdgpu_resume_phase1(adev);
2027 	if (r)
2028 		return r;
2029 	r = amdgpu_resume_phase2(adev);
2030 
2031 	return r;
2032 }
2033 
2034 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
2035 {
2036 	if (amdgpu_sriov_vf(adev)) {
2037 		if (adev->is_atom_fw) {
2038 			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2039 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2040 		} else {
2041 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2042 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2043 		}
2044 
2045 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2046 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
2047 	}
2048 }
2049 
2050 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2051 {
2052 	switch (asic_type) {
2053 #if defined(CONFIG_DRM_AMD_DC)
2054 	case CHIP_BONAIRE:
2055 	case CHIP_HAWAII:
2056 	case CHIP_KAVERI:
2057 	case CHIP_CARRIZO:
2058 	case CHIP_STONEY:
2059 	case CHIP_POLARIS11:
2060 	case CHIP_POLARIS10:
2061 	case CHIP_POLARIS12:
2062 	case CHIP_TONGA:
2063 	case CHIP_FIJI:
2064 #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
2065 		return amdgpu_dc != 0;
2066 #endif
2067 	case CHIP_KABINI:
2068 	case CHIP_MULLINS:
2069 		return amdgpu_dc > 0;
2070 	case CHIP_VEGA10:
2071 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2072 	case CHIP_RAVEN:
2073 #endif
2074 		return amdgpu_dc != 0;
2075 #endif
2076 	default:
2077 		return false;
2078 	}
2079 }
2080 
2081 /**
2082  * amdgpu_device_has_dc_support - check if dc is supported
2083  *
2084  * @adev: amdgpu_device_pointer
2085  *
2086  * Returns true for supported, false for not supported
2087  */
2088 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2089 {
2090 	if (amdgpu_sriov_vf(adev))
2091 		return false;
2092 
2093 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
2094 }
2095 
2096 /**
2097  * amdgpu_device_init - initialize the driver
2098  *
2099  * @adev: amdgpu_device pointer
2100  * @pdev: drm dev pointer
2101  * @pdev: pci dev pointer
2102  * @flags: driver flags
2103  *
2104  * Initializes the driver info and hw (all asics).
2105  * Returns 0 for success or an error on failure.
2106  * Called at driver startup.
2107  */
2108 int amdgpu_device_init(struct amdgpu_device *adev,
2109 		       struct drm_device *ddev,
2110 		       struct pci_dev *pdev,
2111 		       uint32_t flags)
2112 {
2113 	int r, i;
2114 	bool runtime = false;
2115 	u32 max_MBps;
2116 
2117 	adev->shutdown = false;
2118 	adev->dev = &pdev->dev;
2119 	adev->ddev = ddev;
2120 	adev->pdev = pdev;
2121 	adev->flags = flags;
2122 	adev->asic_type = flags & AMD_ASIC_MASK;
2123 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2124 	adev->mc.gart_size = 512 * 1024 * 1024;
2125 	adev->accel_working = false;
2126 	adev->num_rings = 0;
2127 	adev->mman.buffer_funcs = NULL;
2128 	adev->mman.buffer_funcs_ring = NULL;
2129 	adev->vm_manager.vm_pte_funcs = NULL;
2130 	adev->vm_manager.vm_pte_num_rings = 0;
2131 	adev->gart.gart_funcs = NULL;
2132 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2133 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2134 
2135 	adev->smc_rreg = &amdgpu_invalid_rreg;
2136 	adev->smc_wreg = &amdgpu_invalid_wreg;
2137 	adev->pcie_rreg = &amdgpu_invalid_rreg;
2138 	adev->pcie_wreg = &amdgpu_invalid_wreg;
2139 	adev->pciep_rreg = &amdgpu_invalid_rreg;
2140 	adev->pciep_wreg = &amdgpu_invalid_wreg;
2141 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2142 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2143 	adev->didt_rreg = &amdgpu_invalid_rreg;
2144 	adev->didt_wreg = &amdgpu_invalid_wreg;
2145 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2146 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2147 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2148 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2149 
2150 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2151 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2152 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2153 
2154 	/* mutex initialization are all done here so we
2155 	 * can recall function without having locking issues */
2156 	atomic_set(&adev->irq.ih.lock, 0);
2157 	mutex_init(&adev->firmware.mutex);
2158 	mutex_init(&adev->pm.mutex);
2159 	mutex_init(&adev->gfx.gpu_clock_mutex);
2160 	mutex_init(&adev->srbm_mutex);
2161 	mutex_init(&adev->gfx.pipe_reserve_mutex);
2162 	mutex_init(&adev->grbm_idx_mutex);
2163 	mutex_init(&adev->mn_lock);
2164 	mutex_init(&adev->virt.vf_errors.lock);
2165 	hash_init(adev->mn_hash);
2166 
2167 	amdgpu_check_arguments(adev);
2168 
2169 	spin_lock_init(&adev->mmio_idx_lock);
2170 	spin_lock_init(&adev->smc_idx_lock);
2171 	spin_lock_init(&adev->pcie_idx_lock);
2172 	spin_lock_init(&adev->uvd_ctx_idx_lock);
2173 	spin_lock_init(&adev->didt_idx_lock);
2174 	spin_lock_init(&adev->gc_cac_idx_lock);
2175 	spin_lock_init(&adev->se_cac_idx_lock);
2176 	spin_lock_init(&adev->audio_endpt_idx_lock);
2177 	spin_lock_init(&adev->mm_stats.lock);
2178 
2179 	INIT_LIST_HEAD(&adev->shadow_list);
2180 	mutex_init(&adev->shadow_list_lock);
2181 
2182 	INIT_LIST_HEAD(&adev->gtt_list);
2183 	spin_lock_init(&adev->gtt_list_lock);
2184 
2185 	INIT_LIST_HEAD(&adev->ring_lru_list);
2186 	spin_lock_init(&adev->ring_lru_list_lock);
2187 
2188 	INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2189 
2190 	/* Registers mapping */
2191 	/* TODO: block userspace mapping of io register */
2192 	if (adev->asic_type >= CHIP_BONAIRE) {
2193 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2194 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2195 	} else {
2196 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2197 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2198 	}
2199 
2200 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2201 	if (adev->rmmio == NULL) {
2202 		return -ENOMEM;
2203 	}
2204 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2205 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2206 
2207 	/* doorbell bar mapping */
2208 	amdgpu_doorbell_init(adev);
2209 
2210 	/* io port mapping */
2211 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2212 		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2213 			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2214 			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2215 			break;
2216 		}
2217 	}
2218 	if (adev->rio_mem == NULL)
2219 		DRM_INFO("PCI I/O BAR is not found.\n");
2220 
2221 	/* early init functions */
2222 	r = amdgpu_early_init(adev);
2223 	if (r)
2224 		return r;
2225 
2226 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2227 	/* this will fail for cards that aren't VGA class devices, just
2228 	 * ignore it */
2229 	vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2230 
2231 	if (amdgpu_runtime_pm == 1)
2232 		runtime = true;
2233 	if (amdgpu_device_is_px(ddev))
2234 		runtime = true;
2235 	if (!pci_is_thunderbolt_attached(adev->pdev))
2236 		vga_switcheroo_register_client(adev->pdev,
2237 					       &amdgpu_switcheroo_ops, runtime);
2238 	if (runtime)
2239 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2240 
2241 	/* Read BIOS */
2242 	if (!amdgpu_get_bios(adev)) {
2243 		r = -EINVAL;
2244 		goto failed;
2245 	}
2246 
2247 	r = amdgpu_atombios_init(adev);
2248 	if (r) {
2249 		dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2250 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2251 		goto failed;
2252 	}
2253 
2254 	/* detect if we are with an SRIOV vbios */
2255 	amdgpu_device_detect_sriov_bios(adev);
2256 
2257 	/* Post card if necessary */
2258 	if (amdgpu_need_post(adev)) {
2259 		if (!adev->bios) {
2260 			dev_err(adev->dev, "no vBIOS found\n");
2261 			r = -EINVAL;
2262 			goto failed;
2263 		}
2264 		DRM_INFO("GPU posting now...\n");
2265 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2266 		if (r) {
2267 			dev_err(adev->dev, "gpu post error!\n");
2268 			goto failed;
2269 		}
2270 	} else {
2271 		DRM_INFO("GPU post is not needed\n");
2272 	}
2273 
2274 	if (adev->is_atom_fw) {
2275 		/* Initialize clocks */
2276 		r = amdgpu_atomfirmware_get_clock_info(adev);
2277 		if (r) {
2278 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
2279 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2280 			goto failed;
2281 		}
2282 	} else {
2283 		/* Initialize clocks */
2284 		r = amdgpu_atombios_get_clock_info(adev);
2285 		if (r) {
2286 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2287 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
2288 			goto failed;
2289 		}
2290 		/* init i2c buses */
2291 		if (!amdgpu_device_has_dc_support(adev))
2292 			amdgpu_atombios_i2c_init(adev);
2293 	}
2294 
2295 	/* Fence driver */
2296 	r = amdgpu_fence_driver_init(adev);
2297 	if (r) {
2298 		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2299 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
2300 		goto failed;
2301 	}
2302 
2303 	/* init the mode config */
2304 	drm_mode_config_init(adev->ddev);
2305 
2306 	r = amdgpu_init(adev);
2307 	if (r) {
2308 		dev_err(adev->dev, "amdgpu_init failed\n");
2309 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
2310 		amdgpu_fini(adev);
2311 		goto failed;
2312 	}
2313 
2314 	adev->accel_working = true;
2315 
2316 	amdgpu_vm_check_compute_bug(adev);
2317 
2318 	/* Initialize the buffer migration limit. */
2319 	if (amdgpu_moverate >= 0)
2320 		max_MBps = amdgpu_moverate;
2321 	else
2322 		max_MBps = 8; /* Allow 8 MB/s. */
2323 	/* Get a log2 for easy divisions. */
2324 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2325 
2326 	r = amdgpu_ib_pool_init(adev);
2327 	if (r) {
2328 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2329 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2330 		goto failed;
2331 	}
2332 
2333 	r = amdgpu_ib_ring_tests(adev);
2334 	if (r)
2335 		DRM_ERROR("ib ring test failed (%d).\n", r);
2336 
2337 	if (amdgpu_sriov_vf(adev))
2338 		amdgpu_virt_init_data_exchange(adev);
2339 
2340 	amdgpu_fbdev_init(adev);
2341 
2342 	r = amdgpu_pm_sysfs_init(adev);
2343 	if (r)
2344 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2345 
2346 	r = amdgpu_gem_debugfs_init(adev);
2347 	if (r)
2348 		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2349 
2350 	r = amdgpu_debugfs_regs_init(adev);
2351 	if (r)
2352 		DRM_ERROR("registering register debugfs failed (%d).\n", r);
2353 
2354 	r = amdgpu_debugfs_test_ib_ring_init(adev);
2355 	if (r)
2356 		DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2357 
2358 	r = amdgpu_debugfs_firmware_init(adev);
2359 	if (r)
2360 		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2361 
2362 	r = amdgpu_debugfs_vbios_dump_init(adev);
2363 	if (r)
2364 		DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
2365 
2366 	if ((amdgpu_testing & 1)) {
2367 		if (adev->accel_working)
2368 			amdgpu_test_moves(adev);
2369 		else
2370 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2371 	}
2372 	if (amdgpu_benchmarking) {
2373 		if (adev->accel_working)
2374 			amdgpu_benchmark(adev, amdgpu_benchmarking);
2375 		else
2376 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2377 	}
2378 
2379 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
2380 	 * explicit gating rather than handling it automatically.
2381 	 */
2382 	r = amdgpu_late_init(adev);
2383 	if (r) {
2384 		dev_err(adev->dev, "amdgpu_late_init failed\n");
2385 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
2386 		goto failed;
2387 	}
2388 
2389 	return 0;
2390 
2391 failed:
2392 	amdgpu_vf_error_trans_all(adev);
2393 	if (runtime)
2394 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2395 	return r;
2396 }
2397 
2398 /**
2399  * amdgpu_device_fini - tear down the driver
2400  *
2401  * @adev: amdgpu_device pointer
2402  *
2403  * Tear down the driver info (all asics).
2404  * Called at driver shutdown.
2405  */
2406 void amdgpu_device_fini(struct amdgpu_device *adev)
2407 {
2408 	int r;
2409 
2410 	DRM_INFO("amdgpu: finishing device.\n");
2411 	adev->shutdown = true;
2412 	if (adev->mode_info.mode_config_initialized)
2413 		drm_crtc_force_disable_all(adev->ddev);
2414 	/* evict vram memory */
2415 	amdgpu_bo_evict_vram(adev);
2416 	amdgpu_ib_pool_fini(adev);
2417 	amdgpu_fw_reserve_vram_fini(adev);
2418 	amdgpu_fence_driver_fini(adev);
2419 	amdgpu_fbdev_fini(adev);
2420 	r = amdgpu_fini(adev);
2421 	if (adev->firmware.gpu_info_fw) {
2422 		release_firmware(adev->firmware.gpu_info_fw);
2423 		adev->firmware.gpu_info_fw = NULL;
2424 	}
2425 	adev->accel_working = false;
2426 	cancel_delayed_work_sync(&adev->late_init_work);
2427 	/* free i2c buses */
2428 	if (!amdgpu_device_has_dc_support(adev))
2429 		amdgpu_i2c_fini(adev);
2430 	amdgpu_atombios_fini(adev);
2431 	kfree(adev->bios);
2432 	adev->bios = NULL;
2433 	if (!pci_is_thunderbolt_attached(adev->pdev))
2434 		vga_switcheroo_unregister_client(adev->pdev);
2435 	if (adev->flags & AMD_IS_PX)
2436 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
2437 	vga_client_register(adev->pdev, NULL, NULL, NULL);
2438 	if (adev->rio_mem)
2439 		pci_iounmap(adev->pdev, adev->rio_mem);
2440 	adev->rio_mem = NULL;
2441 	iounmap(adev->rmmio);
2442 	adev->rmmio = NULL;
2443 	amdgpu_doorbell_fini(adev);
2444 	amdgpu_pm_sysfs_fini(adev);
2445 	amdgpu_debugfs_regs_cleanup(adev);
2446 }
2447 
2448 
2449 /*
2450  * Suspend & resume.
2451  */
2452 /**
2453  * amdgpu_device_suspend - initiate device suspend
2454  *
2455  * @pdev: drm dev pointer
2456  * @state: suspend state
2457  *
2458  * Puts the hw in the suspend state (all asics).
2459  * Returns 0 for success or an error on failure.
2460  * Called at driver suspend.
2461  */
2462 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2463 {
2464 	struct amdgpu_device *adev;
2465 	struct drm_crtc *crtc;
2466 	struct drm_connector *connector;
2467 	int r;
2468 
2469 	if (dev == NULL || dev->dev_private == NULL) {
2470 		return -ENODEV;
2471 	}
2472 
2473 	adev = dev->dev_private;
2474 
2475 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2476 		return 0;
2477 
2478 	drm_kms_helper_poll_disable(dev);
2479 
2480 	if (!amdgpu_device_has_dc_support(adev)) {
2481 		/* turn off display hw */
2482 		drm_modeset_lock_all(dev);
2483 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2484 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2485 		}
2486 		drm_modeset_unlock_all(dev);
2487 	}
2488 
2489 	amdgpu_amdkfd_suspend(adev);
2490 
2491 	/* unpin the front buffers and cursors */
2492 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2493 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2494 		struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2495 		struct amdgpu_bo *robj;
2496 
2497 		if (amdgpu_crtc->cursor_bo) {
2498 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2499 			r = amdgpu_bo_reserve(aobj, true);
2500 			if (r == 0) {
2501 				amdgpu_bo_unpin(aobj);
2502 				amdgpu_bo_unreserve(aobj);
2503 			}
2504 		}
2505 
2506 		if (rfb == NULL || rfb->obj == NULL) {
2507 			continue;
2508 		}
2509 		robj = gem_to_amdgpu_bo(rfb->obj);
2510 		/* don't unpin kernel fb objects */
2511 		if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2512 			r = amdgpu_bo_reserve(robj, true);
2513 			if (r == 0) {
2514 				amdgpu_bo_unpin(robj);
2515 				amdgpu_bo_unreserve(robj);
2516 			}
2517 		}
2518 	}
2519 	/* evict vram memory */
2520 	amdgpu_bo_evict_vram(adev);
2521 
2522 	amdgpu_fence_driver_suspend(adev);
2523 
2524 	r = amdgpu_suspend(adev);
2525 
2526 	/* evict remaining vram memory
2527 	 * This second call to evict vram is to evict the gart page table
2528 	 * using the CPU.
2529 	 */
2530 	amdgpu_bo_evict_vram(adev);
2531 
2532 	amdgpu_atombios_scratch_regs_save(adev);
2533 	pci_save_state(dev->pdev);
2534 	if (suspend) {
2535 		/* Shut down the device */
2536 		pci_disable_device(dev->pdev);
2537 		pci_set_power_state(dev->pdev, PCI_D3hot);
2538 	} else {
2539 		r = amdgpu_asic_reset(adev);
2540 		if (r)
2541 			DRM_ERROR("amdgpu asic reset failed\n");
2542 	}
2543 
2544 	if (fbcon) {
2545 		console_lock();
2546 		amdgpu_fbdev_set_suspend(adev, 1);
2547 		console_unlock();
2548 	}
2549 	return 0;
2550 }
2551 
2552 /**
2553  * amdgpu_device_resume - initiate device resume
2554  *
2555  * @pdev: drm dev pointer
2556  *
2557  * Bring the hw back to operating state (all asics).
2558  * Returns 0 for success or an error on failure.
2559  * Called at driver resume.
2560  */
2561 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2562 {
2563 	struct drm_connector *connector;
2564 	struct amdgpu_device *adev = dev->dev_private;
2565 	struct drm_crtc *crtc;
2566 	int r = 0;
2567 
2568 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2569 		return 0;
2570 
2571 	if (fbcon)
2572 		console_lock();
2573 
2574 	if (resume) {
2575 		pci_set_power_state(dev->pdev, PCI_D0);
2576 		pci_restore_state(dev->pdev);
2577 		r = pci_enable_device(dev->pdev);
2578 		if (r)
2579 			goto unlock;
2580 	}
2581 	amdgpu_atombios_scratch_regs_restore(adev);
2582 
2583 	/* post card */
2584 	if (amdgpu_need_post(adev)) {
2585 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2586 		if (r)
2587 			DRM_ERROR("amdgpu asic init failed\n");
2588 	}
2589 
2590 	r = amdgpu_resume(adev);
2591 	if (r) {
2592 		DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2593 		goto unlock;
2594 	}
2595 	amdgpu_fence_driver_resume(adev);
2596 
2597 	if (resume) {
2598 		r = amdgpu_ib_ring_tests(adev);
2599 		if (r)
2600 			DRM_ERROR("ib ring test failed (%d).\n", r);
2601 	}
2602 
2603 	r = amdgpu_late_init(adev);
2604 	if (r)
2605 		goto unlock;
2606 
2607 	/* pin cursors */
2608 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2609 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2610 
2611 		if (amdgpu_crtc->cursor_bo) {
2612 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2613 			r = amdgpu_bo_reserve(aobj, true);
2614 			if (r == 0) {
2615 				r = amdgpu_bo_pin(aobj,
2616 						  AMDGPU_GEM_DOMAIN_VRAM,
2617 						  &amdgpu_crtc->cursor_addr);
2618 				if (r != 0)
2619 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2620 				amdgpu_bo_unreserve(aobj);
2621 			}
2622 		}
2623 	}
2624 	r = amdgpu_amdkfd_resume(adev);
2625 	if (r)
2626 		return r;
2627 
2628 	/* blat the mode back in */
2629 	if (fbcon) {
2630 		if (!amdgpu_device_has_dc_support(adev)) {
2631 			/* pre DCE11 */
2632 			drm_helper_resume_force_mode(dev);
2633 
2634 			/* turn on display hw */
2635 			drm_modeset_lock_all(dev);
2636 			list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2637 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2638 			}
2639 			drm_modeset_unlock_all(dev);
2640 		} else {
2641 			/*
2642 			 * There is no equivalent atomic helper to turn on
2643 			 * display, so we defined our own function for this,
2644 			 * once suspend resume is supported by the atomic
2645 			 * framework this will be reworked
2646 			 */
2647 			amdgpu_dm_display_resume(adev);
2648 		}
2649 	}
2650 
2651 	drm_kms_helper_poll_enable(dev);
2652 
2653 	/*
2654 	 * Most of the connector probing functions try to acquire runtime pm
2655 	 * refs to ensure that the GPU is powered on when connector polling is
2656 	 * performed. Since we're calling this from a runtime PM callback,
2657 	 * trying to acquire rpm refs will cause us to deadlock.
2658 	 *
2659 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
2660 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
2661 	 */
2662 #ifdef CONFIG_PM
2663 	dev->dev->power.disable_depth++;
2664 #endif
2665 	if (!amdgpu_device_has_dc_support(adev))
2666 		drm_helper_hpd_irq_event(dev);
2667 	else
2668 		drm_kms_helper_hotplug_event(dev);
2669 #ifdef CONFIG_PM
2670 	dev->dev->power.disable_depth--;
2671 #endif
2672 
2673 	if (fbcon)
2674 		amdgpu_fbdev_set_suspend(adev, 0);
2675 
2676 unlock:
2677 	if (fbcon)
2678 		console_unlock();
2679 
2680 	return r;
2681 }
2682 
2683 static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2684 {
2685 	int i;
2686 	bool asic_hang = false;
2687 
2688 	if (amdgpu_sriov_vf(adev))
2689 		return true;
2690 
2691 	for (i = 0; i < adev->num_ip_blocks; i++) {
2692 		if (!adev->ip_blocks[i].status.valid)
2693 			continue;
2694 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2695 			adev->ip_blocks[i].status.hang =
2696 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2697 		if (adev->ip_blocks[i].status.hang) {
2698 			DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2699 			asic_hang = true;
2700 		}
2701 	}
2702 	return asic_hang;
2703 }
2704 
2705 static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2706 {
2707 	int i, r = 0;
2708 
2709 	for (i = 0; i < adev->num_ip_blocks; i++) {
2710 		if (!adev->ip_blocks[i].status.valid)
2711 			continue;
2712 		if (adev->ip_blocks[i].status.hang &&
2713 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2714 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2715 			if (r)
2716 				return r;
2717 		}
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2724 {
2725 	int i;
2726 
2727 	for (i = 0; i < adev->num_ip_blocks; i++) {
2728 		if (!adev->ip_blocks[i].status.valid)
2729 			continue;
2730 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2731 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2732 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2733 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2734 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2735 			if (adev->ip_blocks[i].status.hang) {
2736 				DRM_INFO("Some block need full reset!\n");
2737 				return true;
2738 			}
2739 		}
2740 	}
2741 	return false;
2742 }
2743 
2744 static int amdgpu_soft_reset(struct amdgpu_device *adev)
2745 {
2746 	int i, r = 0;
2747 
2748 	for (i = 0; i < adev->num_ip_blocks; i++) {
2749 		if (!adev->ip_blocks[i].status.valid)
2750 			continue;
2751 		if (adev->ip_blocks[i].status.hang &&
2752 		    adev->ip_blocks[i].version->funcs->soft_reset) {
2753 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2754 			if (r)
2755 				return r;
2756 		}
2757 	}
2758 
2759 	return 0;
2760 }
2761 
2762 static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2763 {
2764 	int i, r = 0;
2765 
2766 	for (i = 0; i < adev->num_ip_blocks; i++) {
2767 		if (!adev->ip_blocks[i].status.valid)
2768 			continue;
2769 		if (adev->ip_blocks[i].status.hang &&
2770 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
2771 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2772 		if (r)
2773 			return r;
2774 	}
2775 
2776 	return 0;
2777 }
2778 
2779 bool amdgpu_need_backup(struct amdgpu_device *adev)
2780 {
2781 	if (adev->flags & AMD_IS_APU)
2782 		return false;
2783 
2784 	return amdgpu_lockup_timeout > 0 ? true : false;
2785 }
2786 
2787 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2788 					   struct amdgpu_ring *ring,
2789 					   struct amdgpu_bo *bo,
2790 					   struct dma_fence **fence)
2791 {
2792 	uint32_t domain;
2793 	int r;
2794 
2795 	if (!bo->shadow)
2796 		return 0;
2797 
2798 	r = amdgpu_bo_reserve(bo, true);
2799 	if (r)
2800 		return r;
2801 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2802 	/* if bo has been evicted, then no need to recover */
2803 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2804 		r = amdgpu_bo_validate(bo->shadow);
2805 		if (r) {
2806 			DRM_ERROR("bo validate failed!\n");
2807 			goto err;
2808 		}
2809 
2810 		r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2811 						 NULL, fence, true);
2812 		if (r) {
2813 			DRM_ERROR("recover page table failed!\n");
2814 			goto err;
2815 		}
2816 	}
2817 err:
2818 	amdgpu_bo_unreserve(bo);
2819 	return r;
2820 }
2821 
2822 /**
2823  * amdgpu_sriov_gpu_reset - reset the asic
2824  *
2825  * @adev: amdgpu device pointer
2826  * @job: which job trigger hang
2827  *
2828  * Attempt the reset the GPU if it has hung (all asics).
2829  * for SRIOV case.
2830  * Returns 0 for success or an error on failure.
2831  */
2832 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
2833 {
2834 	int i, j, r = 0;
2835 	int resched;
2836 	struct amdgpu_bo *bo, *tmp;
2837 	struct amdgpu_ring *ring;
2838 	struct dma_fence *fence = NULL, *next = NULL;
2839 
2840 	mutex_lock(&adev->virt.lock_reset);
2841 	atomic_inc(&adev->gpu_reset_counter);
2842 	adev->in_sriov_reset = true;
2843 
2844 	/* block TTM */
2845 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2846 
2847 	/* we start from the ring trigger GPU hang */
2848 	j = job ? job->ring->idx : 0;
2849 
2850 	/* block scheduler */
2851 	for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2852 		ring = adev->rings[i % AMDGPU_MAX_RINGS];
2853 		if (!ring || !ring->sched.thread)
2854 			continue;
2855 
2856 		kthread_park(ring->sched.thread);
2857 
2858 		if (job && j != i)
2859 			continue;
2860 
2861 		/* here give the last chance to check if job removed from mirror-list
2862 		 * since we already pay some time on kthread_park */
2863 		if (job && list_empty(&job->base.node)) {
2864 			kthread_unpark(ring->sched.thread);
2865 			goto give_up_reset;
2866 		}
2867 
2868 		if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2869 			amd_sched_job_kickout(&job->base);
2870 
2871 		/* only do job_reset on the hang ring if @job not NULL */
2872 		amd_sched_hw_job_reset(&ring->sched);
2873 
2874 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2875 		amdgpu_fence_driver_force_completion_ring(ring);
2876 	}
2877 
2878 	/* request to take full control of GPU before re-initialization  */
2879 	if (job)
2880 		amdgpu_virt_reset_gpu(adev);
2881 	else
2882 		amdgpu_virt_request_full_gpu(adev, true);
2883 
2884 
2885 	/* Resume IP prior to SMC */
2886 	amdgpu_sriov_reinit_early(adev);
2887 
2888 	/* we need recover gart prior to run SMC/CP/SDMA resume */
2889 	amdgpu_ttm_recover_gart(adev);
2890 
2891 	/* now we are okay to resume SMC/CP/SDMA */
2892 	amdgpu_sriov_reinit_late(adev);
2893 
2894 	amdgpu_irq_gpu_reset_resume_helper(adev);
2895 
2896 	if (amdgpu_ib_ring_tests(adev))
2897 		dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2898 
2899 	/* release full control of GPU after ib test */
2900 	amdgpu_virt_release_full_gpu(adev, true);
2901 
2902 	DRM_INFO("recover vram bo from shadow\n");
2903 
2904 	ring = adev->mman.buffer_funcs_ring;
2905 	mutex_lock(&adev->shadow_list_lock);
2906 	list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2907 		next = NULL;
2908 		amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2909 		if (fence) {
2910 			r = dma_fence_wait(fence, false);
2911 			if (r) {
2912 				WARN(r, "recovery from shadow isn't completed\n");
2913 				break;
2914 			}
2915 		}
2916 
2917 		dma_fence_put(fence);
2918 		fence = next;
2919 	}
2920 	mutex_unlock(&adev->shadow_list_lock);
2921 
2922 	if (fence) {
2923 		r = dma_fence_wait(fence, false);
2924 		if (r)
2925 			WARN(r, "recovery from shadow isn't completed\n");
2926 	}
2927 	dma_fence_put(fence);
2928 
2929 	for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2930 		ring = adev->rings[i % AMDGPU_MAX_RINGS];
2931 		if (!ring || !ring->sched.thread)
2932 			continue;
2933 
2934 		if (job && j != i) {
2935 			kthread_unpark(ring->sched.thread);
2936 			continue;
2937 		}
2938 
2939 		amd_sched_job_recovery(&ring->sched);
2940 		kthread_unpark(ring->sched.thread);
2941 	}
2942 
2943 	drm_helper_resume_force_mode(adev->ddev);
2944 give_up_reset:
2945 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2946 	if (r) {
2947 		/* bad news, how to tell it to userspace ? */
2948 		dev_info(adev->dev, "GPU reset failed\n");
2949 	} else {
2950 		dev_info(adev->dev, "GPU reset successed!\n");
2951 	}
2952 
2953 	adev->in_sriov_reset = false;
2954 	mutex_unlock(&adev->virt.lock_reset);
2955 	return r;
2956 }
2957 
2958 /**
2959  * amdgpu_gpu_reset - reset the asic
2960  *
2961  * @adev: amdgpu device pointer
2962  *
2963  * Attempt the reset the GPU if it has hung (all asics).
2964  * Returns 0 for success or an error on failure.
2965  */
2966 int amdgpu_gpu_reset(struct amdgpu_device *adev)
2967 {
2968 	struct drm_atomic_state *state = NULL;
2969 	int i, r;
2970 	int resched;
2971 	bool need_full_reset, vram_lost = false;
2972 
2973 	if (!amdgpu_check_soft_reset(adev)) {
2974 		DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2975 		return 0;
2976 	}
2977 
2978 	atomic_inc(&adev->gpu_reset_counter);
2979 
2980 	/* block TTM */
2981 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2982 	/* store modesetting */
2983 	if (amdgpu_device_has_dc_support(adev))
2984 		state = drm_atomic_helper_suspend(adev->ddev);
2985 
2986 	/* block scheduler */
2987 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2988 		struct amdgpu_ring *ring = adev->rings[i];
2989 
2990 		if (!ring || !ring->sched.thread)
2991 			continue;
2992 		kthread_park(ring->sched.thread);
2993 		amd_sched_hw_job_reset(&ring->sched);
2994 	}
2995 	/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2996 	amdgpu_fence_driver_force_completion(adev);
2997 
2998 	need_full_reset = amdgpu_need_full_reset(adev);
2999 
3000 	if (!need_full_reset) {
3001 		amdgpu_pre_soft_reset(adev);
3002 		r = amdgpu_soft_reset(adev);
3003 		amdgpu_post_soft_reset(adev);
3004 		if (r || amdgpu_check_soft_reset(adev)) {
3005 			DRM_INFO("soft reset failed, will fallback to full reset!\n");
3006 			need_full_reset = true;
3007 		}
3008 	}
3009 
3010 	if (need_full_reset) {
3011 		r = amdgpu_suspend(adev);
3012 
3013 retry:
3014 		amdgpu_atombios_scratch_regs_save(adev);
3015 		r = amdgpu_asic_reset(adev);
3016 		amdgpu_atombios_scratch_regs_restore(adev);
3017 		/* post card */
3018 		amdgpu_atom_asic_init(adev->mode_info.atom_context);
3019 
3020 		if (!r) {
3021 			dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
3022 			r = amdgpu_resume_phase1(adev);
3023 			if (r)
3024 				goto out;
3025 			vram_lost = amdgpu_check_vram_lost(adev);
3026 			if (vram_lost) {
3027 				DRM_ERROR("VRAM is lost!\n");
3028 				atomic_inc(&adev->vram_lost_counter);
3029 			}
3030 			r = amdgpu_ttm_recover_gart(adev);
3031 			if (r)
3032 				goto out;
3033 			r = amdgpu_resume_phase2(adev);
3034 			if (r)
3035 				goto out;
3036 			if (vram_lost)
3037 				amdgpu_fill_reset_magic(adev);
3038 		}
3039 	}
3040 out:
3041 	if (!r) {
3042 		amdgpu_irq_gpu_reset_resume_helper(adev);
3043 		r = amdgpu_ib_ring_tests(adev);
3044 		if (r) {
3045 			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
3046 			r = amdgpu_suspend(adev);
3047 			need_full_reset = true;
3048 			goto retry;
3049 		}
3050 		/**
3051 		 * recovery vm page tables, since we cannot depend on VRAM is
3052 		 * consistent after gpu full reset.
3053 		 */
3054 		if (need_full_reset && amdgpu_need_backup(adev)) {
3055 			struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
3056 			struct amdgpu_bo *bo, *tmp;
3057 			struct dma_fence *fence = NULL, *next = NULL;
3058 
3059 			DRM_INFO("recover vram bo from shadow\n");
3060 			mutex_lock(&adev->shadow_list_lock);
3061 			list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
3062 				next = NULL;
3063 				amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
3064 				if (fence) {
3065 					r = dma_fence_wait(fence, false);
3066 					if (r) {
3067 						WARN(r, "recovery from shadow isn't completed\n");
3068 						break;
3069 					}
3070 				}
3071 
3072 				dma_fence_put(fence);
3073 				fence = next;
3074 			}
3075 			mutex_unlock(&adev->shadow_list_lock);
3076 			if (fence) {
3077 				r = dma_fence_wait(fence, false);
3078 				if (r)
3079 					WARN(r, "recovery from shadow isn't completed\n");
3080 			}
3081 			dma_fence_put(fence);
3082 		}
3083 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3084 			struct amdgpu_ring *ring = adev->rings[i];
3085 
3086 			if (!ring || !ring->sched.thread)
3087 				continue;
3088 
3089 			amd_sched_job_recovery(&ring->sched);
3090 			kthread_unpark(ring->sched.thread);
3091 		}
3092 	} else {
3093 		dev_err(adev->dev, "asic resume failed (%d).\n", r);
3094 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3095 			if (adev->rings[i] && adev->rings[i]->sched.thread) {
3096 				kthread_unpark(adev->rings[i]->sched.thread);
3097 			}
3098 		}
3099 	}
3100 
3101 	if (amdgpu_device_has_dc_support(adev)) {
3102 		r = drm_atomic_helper_resume(adev->ddev, state);
3103 		amdgpu_dm_display_resume(adev);
3104 	} else
3105 		drm_helper_resume_force_mode(adev->ddev);
3106 
3107 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
3108 	if (r) {
3109 		/* bad news, how to tell it to userspace ? */
3110 		dev_info(adev->dev, "GPU reset failed\n");
3111 	}
3112 	else {
3113 		dev_info(adev->dev, "GPU reset successed!\n");
3114 	}
3115 
3116 	amdgpu_vf_error_trans_all(adev);
3117 	return r;
3118 }
3119 
3120 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
3121 {
3122 	u32 mask;
3123 	int ret;
3124 
3125 	if (amdgpu_pcie_gen_cap)
3126 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3127 
3128 	if (amdgpu_pcie_lane_cap)
3129 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3130 
3131 	/* covers APUs as well */
3132 	if (pci_is_root_bus(adev->pdev->bus)) {
3133 		if (adev->pm.pcie_gen_mask == 0)
3134 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3135 		if (adev->pm.pcie_mlw_mask == 0)
3136 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3137 		return;
3138 	}
3139 
3140 	if (adev->pm.pcie_gen_mask == 0) {
3141 		ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3142 		if (!ret) {
3143 			adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3144 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3145 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3146 
3147 			if (mask & DRM_PCIE_SPEED_25)
3148 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3149 			if (mask & DRM_PCIE_SPEED_50)
3150 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3151 			if (mask & DRM_PCIE_SPEED_80)
3152 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3153 		} else {
3154 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3155 		}
3156 	}
3157 	if (adev->pm.pcie_mlw_mask == 0) {
3158 		ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3159 		if (!ret) {
3160 			switch (mask) {
3161 			case 32:
3162 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3163 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3164 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3165 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3166 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3167 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3168 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3169 				break;
3170 			case 16:
3171 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3172 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3173 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3174 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3175 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3176 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3177 				break;
3178 			case 12:
3179 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3180 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3181 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3182 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3183 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3184 				break;
3185 			case 8:
3186 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3187 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3188 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3189 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3190 				break;
3191 			case 4:
3192 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3193 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3194 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3195 				break;
3196 			case 2:
3197 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3198 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3199 				break;
3200 			case 1:
3201 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3202 				break;
3203 			default:
3204 				break;
3205 			}
3206 		} else {
3207 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3208 		}
3209 	}
3210 }
3211 
3212 /*
3213  * Debugfs
3214  */
3215 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
3216 			     const struct drm_info_list *files,
3217 			     unsigned nfiles)
3218 {
3219 	unsigned i;
3220 
3221 	for (i = 0; i < adev->debugfs_count; i++) {
3222 		if (adev->debugfs[i].files == files) {
3223 			/* Already registered */
3224 			return 0;
3225 		}
3226 	}
3227 
3228 	i = adev->debugfs_count + 1;
3229 	if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3230 		DRM_ERROR("Reached maximum number of debugfs components.\n");
3231 		DRM_ERROR("Report so we increase "
3232 			  "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3233 		return -EINVAL;
3234 	}
3235 	adev->debugfs[adev->debugfs_count].files = files;
3236 	adev->debugfs[adev->debugfs_count].num_files = nfiles;
3237 	adev->debugfs_count = i;
3238 #if defined(CONFIG_DEBUG_FS)
3239 	drm_debugfs_create_files(files, nfiles,
3240 				 adev->ddev->primary->debugfs_root,
3241 				 adev->ddev->primary);
3242 #endif
3243 	return 0;
3244 }
3245 
3246 #if defined(CONFIG_DEBUG_FS)
3247 
3248 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3249 					size_t size, loff_t *pos)
3250 {
3251 	struct amdgpu_device *adev = file_inode(f)->i_private;
3252 	ssize_t result = 0;
3253 	int r;
3254 	bool pm_pg_lock, use_bank;
3255 	unsigned instance_bank, sh_bank, se_bank;
3256 
3257 	if (size & 0x3 || *pos & 0x3)
3258 		return -EINVAL;
3259 
3260 	/* are we reading registers for which a PG lock is necessary? */
3261 	pm_pg_lock = (*pos >> 23) & 1;
3262 
3263 	if (*pos & (1ULL << 62)) {
3264 		se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3265 		sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3266 		instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
3267 
3268 		if (se_bank == 0x3FF)
3269 			se_bank = 0xFFFFFFFF;
3270 		if (sh_bank == 0x3FF)
3271 			sh_bank = 0xFFFFFFFF;
3272 		if (instance_bank == 0x3FF)
3273 			instance_bank = 0xFFFFFFFF;
3274 		use_bank = 1;
3275 	} else {
3276 		use_bank = 0;
3277 	}
3278 
3279 	*pos &= (1UL << 22) - 1;
3280 
3281 	if (use_bank) {
3282 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3283 		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3284 			return -EINVAL;
3285 		mutex_lock(&adev->grbm_idx_mutex);
3286 		amdgpu_gfx_select_se_sh(adev, se_bank,
3287 					sh_bank, instance_bank);
3288 	}
3289 
3290 	if (pm_pg_lock)
3291 		mutex_lock(&adev->pm.mutex);
3292 
3293 	while (size) {
3294 		uint32_t value;
3295 
3296 		if (*pos > adev->rmmio_size)
3297 			goto end;
3298 
3299 		value = RREG32(*pos >> 2);
3300 		r = put_user(value, (uint32_t *)buf);
3301 		if (r) {
3302 			result = r;
3303 			goto end;
3304 		}
3305 
3306 		result += 4;
3307 		buf += 4;
3308 		*pos += 4;
3309 		size -= 4;
3310 	}
3311 
3312 end:
3313 	if (use_bank) {
3314 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3315 		mutex_unlock(&adev->grbm_idx_mutex);
3316 	}
3317 
3318 	if (pm_pg_lock)
3319 		mutex_unlock(&adev->pm.mutex);
3320 
3321 	return result;
3322 }
3323 
3324 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3325 					 size_t size, loff_t *pos)
3326 {
3327 	struct amdgpu_device *adev = file_inode(f)->i_private;
3328 	ssize_t result = 0;
3329 	int r;
3330 	bool pm_pg_lock, use_bank;
3331 	unsigned instance_bank, sh_bank, se_bank;
3332 
3333 	if (size & 0x3 || *pos & 0x3)
3334 		return -EINVAL;
3335 
3336 	/* are we reading registers for which a PG lock is necessary? */
3337 	pm_pg_lock = (*pos >> 23) & 1;
3338 
3339 	if (*pos & (1ULL << 62)) {
3340 		se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
3341 		sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
3342 		instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
3343 
3344 		if (se_bank == 0x3FF)
3345 			se_bank = 0xFFFFFFFF;
3346 		if (sh_bank == 0x3FF)
3347 			sh_bank = 0xFFFFFFFF;
3348 		if (instance_bank == 0x3FF)
3349 			instance_bank = 0xFFFFFFFF;
3350 		use_bank = 1;
3351 	} else {
3352 		use_bank = 0;
3353 	}
3354 
3355 	*pos &= (1UL << 22) - 1;
3356 
3357 	if (use_bank) {
3358 		if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3359 		    (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3360 			return -EINVAL;
3361 		mutex_lock(&adev->grbm_idx_mutex);
3362 		amdgpu_gfx_select_se_sh(adev, se_bank,
3363 					sh_bank, instance_bank);
3364 	}
3365 
3366 	if (pm_pg_lock)
3367 		mutex_lock(&adev->pm.mutex);
3368 
3369 	while (size) {
3370 		uint32_t value;
3371 
3372 		if (*pos > adev->rmmio_size)
3373 			return result;
3374 
3375 		r = get_user(value, (uint32_t *)buf);
3376 		if (r)
3377 			return r;
3378 
3379 		WREG32(*pos >> 2, value);
3380 
3381 		result += 4;
3382 		buf += 4;
3383 		*pos += 4;
3384 		size -= 4;
3385 	}
3386 
3387 	if (use_bank) {
3388 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3389 		mutex_unlock(&adev->grbm_idx_mutex);
3390 	}
3391 
3392 	if (pm_pg_lock)
3393 		mutex_unlock(&adev->pm.mutex);
3394 
3395 	return result;
3396 }
3397 
3398 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3399 					size_t size, loff_t *pos)
3400 {
3401 	struct amdgpu_device *adev = file_inode(f)->i_private;
3402 	ssize_t result = 0;
3403 	int r;
3404 
3405 	if (size & 0x3 || *pos & 0x3)
3406 		return -EINVAL;
3407 
3408 	while (size) {
3409 		uint32_t value;
3410 
3411 		value = RREG32_PCIE(*pos >> 2);
3412 		r = put_user(value, (uint32_t *)buf);
3413 		if (r)
3414 			return r;
3415 
3416 		result += 4;
3417 		buf += 4;
3418 		*pos += 4;
3419 		size -= 4;
3420 	}
3421 
3422 	return result;
3423 }
3424 
3425 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3426 					 size_t size, loff_t *pos)
3427 {
3428 	struct amdgpu_device *adev = file_inode(f)->i_private;
3429 	ssize_t result = 0;
3430 	int r;
3431 
3432 	if (size & 0x3 || *pos & 0x3)
3433 		return -EINVAL;
3434 
3435 	while (size) {
3436 		uint32_t value;
3437 
3438 		r = get_user(value, (uint32_t *)buf);
3439 		if (r)
3440 			return r;
3441 
3442 		WREG32_PCIE(*pos >> 2, value);
3443 
3444 		result += 4;
3445 		buf += 4;
3446 		*pos += 4;
3447 		size -= 4;
3448 	}
3449 
3450 	return result;
3451 }
3452 
3453 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3454 					size_t size, loff_t *pos)
3455 {
3456 	struct amdgpu_device *adev = file_inode(f)->i_private;
3457 	ssize_t result = 0;
3458 	int r;
3459 
3460 	if (size & 0x3 || *pos & 0x3)
3461 		return -EINVAL;
3462 
3463 	while (size) {
3464 		uint32_t value;
3465 
3466 		value = RREG32_DIDT(*pos >> 2);
3467 		r = put_user(value, (uint32_t *)buf);
3468 		if (r)
3469 			return r;
3470 
3471 		result += 4;
3472 		buf += 4;
3473 		*pos += 4;
3474 		size -= 4;
3475 	}
3476 
3477 	return result;
3478 }
3479 
3480 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3481 					 size_t size, loff_t *pos)
3482 {
3483 	struct amdgpu_device *adev = file_inode(f)->i_private;
3484 	ssize_t result = 0;
3485 	int r;
3486 
3487 	if (size & 0x3 || *pos & 0x3)
3488 		return -EINVAL;
3489 
3490 	while (size) {
3491 		uint32_t value;
3492 
3493 		r = get_user(value, (uint32_t *)buf);
3494 		if (r)
3495 			return r;
3496 
3497 		WREG32_DIDT(*pos >> 2, value);
3498 
3499 		result += 4;
3500 		buf += 4;
3501 		*pos += 4;
3502 		size -= 4;
3503 	}
3504 
3505 	return result;
3506 }
3507 
3508 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3509 					size_t size, loff_t *pos)
3510 {
3511 	struct amdgpu_device *adev = file_inode(f)->i_private;
3512 	ssize_t result = 0;
3513 	int r;
3514 
3515 	if (size & 0x3 || *pos & 0x3)
3516 		return -EINVAL;
3517 
3518 	while (size) {
3519 		uint32_t value;
3520 
3521 		value = RREG32_SMC(*pos);
3522 		r = put_user(value, (uint32_t *)buf);
3523 		if (r)
3524 			return r;
3525 
3526 		result += 4;
3527 		buf += 4;
3528 		*pos += 4;
3529 		size -= 4;
3530 	}
3531 
3532 	return result;
3533 }
3534 
3535 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3536 					 size_t size, loff_t *pos)
3537 {
3538 	struct amdgpu_device *adev = file_inode(f)->i_private;
3539 	ssize_t result = 0;
3540 	int r;
3541 
3542 	if (size & 0x3 || *pos & 0x3)
3543 		return -EINVAL;
3544 
3545 	while (size) {
3546 		uint32_t value;
3547 
3548 		r = get_user(value, (uint32_t *)buf);
3549 		if (r)
3550 			return r;
3551 
3552 		WREG32_SMC(*pos, value);
3553 
3554 		result += 4;
3555 		buf += 4;
3556 		*pos += 4;
3557 		size -= 4;
3558 	}
3559 
3560 	return result;
3561 }
3562 
3563 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3564 					size_t size, loff_t *pos)
3565 {
3566 	struct amdgpu_device *adev = file_inode(f)->i_private;
3567 	ssize_t result = 0;
3568 	int r;
3569 	uint32_t *config, no_regs = 0;
3570 
3571 	if (size & 0x3 || *pos & 0x3)
3572 		return -EINVAL;
3573 
3574 	config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
3575 	if (!config)
3576 		return -ENOMEM;
3577 
3578 	/* version, increment each time something is added */
3579 	config[no_regs++] = 3;
3580 	config[no_regs++] = adev->gfx.config.max_shader_engines;
3581 	config[no_regs++] = adev->gfx.config.max_tile_pipes;
3582 	config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3583 	config[no_regs++] = adev->gfx.config.max_sh_per_se;
3584 	config[no_regs++] = adev->gfx.config.max_backends_per_se;
3585 	config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3586 	config[no_regs++] = adev->gfx.config.max_gprs;
3587 	config[no_regs++] = adev->gfx.config.max_gs_threads;
3588 	config[no_regs++] = adev->gfx.config.max_hw_contexts;
3589 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3590 	config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3591 	config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3592 	config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3593 	config[no_regs++] = adev->gfx.config.num_tile_pipes;
3594 	config[no_regs++] = adev->gfx.config.backend_enable_mask;
3595 	config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3596 	config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3597 	config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3598 	config[no_regs++] = adev->gfx.config.num_gpus;
3599 	config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3600 	config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3601 	config[no_regs++] = adev->gfx.config.gb_addr_config;
3602 	config[no_regs++] = adev->gfx.config.num_rbs;
3603 
3604 	/* rev==1 */
3605 	config[no_regs++] = adev->rev_id;
3606 	config[no_regs++] = adev->pg_flags;
3607 	config[no_regs++] = adev->cg_flags;
3608 
3609 	/* rev==2 */
3610 	config[no_regs++] = adev->family;
3611 	config[no_regs++] = adev->external_rev_id;
3612 
3613 	/* rev==3 */
3614 	config[no_regs++] = adev->pdev->device;
3615 	config[no_regs++] = adev->pdev->revision;
3616 	config[no_regs++] = adev->pdev->subsystem_device;
3617 	config[no_regs++] = adev->pdev->subsystem_vendor;
3618 
3619 	while (size && (*pos < no_regs * 4)) {
3620 		uint32_t value;
3621 
3622 		value = config[*pos >> 2];
3623 		r = put_user(value, (uint32_t *)buf);
3624 		if (r) {
3625 			kfree(config);
3626 			return r;
3627 		}
3628 
3629 		result += 4;
3630 		buf += 4;
3631 		*pos += 4;
3632 		size -= 4;
3633 	}
3634 
3635 	kfree(config);
3636 	return result;
3637 }
3638 
3639 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3640 					size_t size, loff_t *pos)
3641 {
3642 	struct amdgpu_device *adev = file_inode(f)->i_private;
3643 	int idx, x, outsize, r, valuesize;
3644 	uint32_t values[16];
3645 
3646 	if (size & 3 || *pos & 0x3)
3647 		return -EINVAL;
3648 
3649 	if (amdgpu_dpm == 0)
3650 		return -EINVAL;
3651 
3652 	/* convert offset to sensor number */
3653 	idx = *pos >> 2;
3654 
3655 	valuesize = sizeof(values);
3656 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
3657 		r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
3658 	else
3659 		return -EINVAL;
3660 
3661 	if (size > valuesize)
3662 		return -EINVAL;
3663 
3664 	outsize = 0;
3665 	x = 0;
3666 	if (!r) {
3667 		while (size) {
3668 			r = put_user(values[x++], (int32_t *)buf);
3669 			buf += 4;
3670 			size -= 4;
3671 			outsize += 4;
3672 		}
3673 	}
3674 
3675 	return !r ? outsize : r;
3676 }
3677 
3678 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3679 					size_t size, loff_t *pos)
3680 {
3681 	struct amdgpu_device *adev = f->f_inode->i_private;
3682 	int r, x;
3683 	ssize_t result=0;
3684 	uint32_t offset, se, sh, cu, wave, simd, data[32];
3685 
3686 	if (size & 3 || *pos & 3)
3687 		return -EINVAL;
3688 
3689 	/* decode offset */
3690 	offset = (*pos & GENMASK_ULL(6, 0));
3691 	se = (*pos & GENMASK_ULL(14, 7)) >> 7;
3692 	sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
3693 	cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
3694 	wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
3695 	simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
3696 
3697 	/* switch to the specific se/sh/cu */
3698 	mutex_lock(&adev->grbm_idx_mutex);
3699 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3700 
3701 	x = 0;
3702 	if (adev->gfx.funcs->read_wave_data)
3703 		adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
3704 
3705 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3706 	mutex_unlock(&adev->grbm_idx_mutex);
3707 
3708 	if (!x)
3709 		return -EINVAL;
3710 
3711 	while (size && (offset < x * 4)) {
3712 		uint32_t value;
3713 
3714 		value = data[offset >> 2];
3715 		r = put_user(value, (uint32_t *)buf);
3716 		if (r)
3717 			return r;
3718 
3719 		result += 4;
3720 		buf += 4;
3721 		offset += 4;
3722 		size -= 4;
3723 	}
3724 
3725 	return result;
3726 }
3727 
3728 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3729 					size_t size, loff_t *pos)
3730 {
3731 	struct amdgpu_device *adev = f->f_inode->i_private;
3732 	int r;
3733 	ssize_t result = 0;
3734 	uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3735 
3736 	if (size & 3 || *pos & 3)
3737 		return -EINVAL;
3738 
3739 	/* decode offset */
3740 	offset = *pos & GENMASK_ULL(11, 0);
3741 	se = (*pos & GENMASK_ULL(19, 12)) >> 12;
3742 	sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
3743 	cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
3744 	wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
3745 	simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
3746 	thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
3747 	bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
3748 
3749 	data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3750 	if (!data)
3751 		return -ENOMEM;
3752 
3753 	/* switch to the specific se/sh/cu */
3754 	mutex_lock(&adev->grbm_idx_mutex);
3755 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3756 
3757 	if (bank == 0) {
3758 		if (adev->gfx.funcs->read_wave_vgprs)
3759 			adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3760 	} else {
3761 		if (adev->gfx.funcs->read_wave_sgprs)
3762 			adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3763 	}
3764 
3765 	amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3766 	mutex_unlock(&adev->grbm_idx_mutex);
3767 
3768 	while (size) {
3769 		uint32_t value;
3770 
3771 		value = data[offset++];
3772 		r = put_user(value, (uint32_t *)buf);
3773 		if (r) {
3774 			result = r;
3775 			goto err;
3776 		}
3777 
3778 		result += 4;
3779 		buf += 4;
3780 		size -= 4;
3781 	}
3782 
3783 err:
3784 	kfree(data);
3785 	return result;
3786 }
3787 
3788 static const struct file_operations amdgpu_debugfs_regs_fops = {
3789 	.owner = THIS_MODULE,
3790 	.read = amdgpu_debugfs_regs_read,
3791 	.write = amdgpu_debugfs_regs_write,
3792 	.llseek = default_llseek
3793 };
3794 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3795 	.owner = THIS_MODULE,
3796 	.read = amdgpu_debugfs_regs_didt_read,
3797 	.write = amdgpu_debugfs_regs_didt_write,
3798 	.llseek = default_llseek
3799 };
3800 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3801 	.owner = THIS_MODULE,
3802 	.read = amdgpu_debugfs_regs_pcie_read,
3803 	.write = amdgpu_debugfs_regs_pcie_write,
3804 	.llseek = default_llseek
3805 };
3806 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3807 	.owner = THIS_MODULE,
3808 	.read = amdgpu_debugfs_regs_smc_read,
3809 	.write = amdgpu_debugfs_regs_smc_write,
3810 	.llseek = default_llseek
3811 };
3812 
3813 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3814 	.owner = THIS_MODULE,
3815 	.read = amdgpu_debugfs_gca_config_read,
3816 	.llseek = default_llseek
3817 };
3818 
3819 static const struct file_operations amdgpu_debugfs_sensors_fops = {
3820 	.owner = THIS_MODULE,
3821 	.read = amdgpu_debugfs_sensor_read,
3822 	.llseek = default_llseek
3823 };
3824 
3825 static const struct file_operations amdgpu_debugfs_wave_fops = {
3826 	.owner = THIS_MODULE,
3827 	.read = amdgpu_debugfs_wave_read,
3828 	.llseek = default_llseek
3829 };
3830 static const struct file_operations amdgpu_debugfs_gpr_fops = {
3831 	.owner = THIS_MODULE,
3832 	.read = amdgpu_debugfs_gpr_read,
3833 	.llseek = default_llseek
3834 };
3835 
3836 static const struct file_operations *debugfs_regs[] = {
3837 	&amdgpu_debugfs_regs_fops,
3838 	&amdgpu_debugfs_regs_didt_fops,
3839 	&amdgpu_debugfs_regs_pcie_fops,
3840 	&amdgpu_debugfs_regs_smc_fops,
3841 	&amdgpu_debugfs_gca_config_fops,
3842 	&amdgpu_debugfs_sensors_fops,
3843 	&amdgpu_debugfs_wave_fops,
3844 	&amdgpu_debugfs_gpr_fops,
3845 };
3846 
3847 static const char *debugfs_regs_names[] = {
3848 	"amdgpu_regs",
3849 	"amdgpu_regs_didt",
3850 	"amdgpu_regs_pcie",
3851 	"amdgpu_regs_smc",
3852 	"amdgpu_gca_config",
3853 	"amdgpu_sensors",
3854 	"amdgpu_wave",
3855 	"amdgpu_gpr",
3856 };
3857 
3858 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3859 {
3860 	struct drm_minor *minor = adev->ddev->primary;
3861 	struct dentry *ent, *root = minor->debugfs_root;
3862 	unsigned i, j;
3863 
3864 	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3865 		ent = debugfs_create_file(debugfs_regs_names[i],
3866 					  S_IFREG | S_IRUGO, root,
3867 					  adev, debugfs_regs[i]);
3868 		if (IS_ERR(ent)) {
3869 			for (j = 0; j < i; j++) {
3870 				debugfs_remove(adev->debugfs_regs[i]);
3871 				adev->debugfs_regs[i] = NULL;
3872 			}
3873 			return PTR_ERR(ent);
3874 		}
3875 
3876 		if (!i)
3877 			i_size_write(ent->d_inode, adev->rmmio_size);
3878 		adev->debugfs_regs[i] = ent;
3879 	}
3880 
3881 	return 0;
3882 }
3883 
3884 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3885 {
3886 	unsigned i;
3887 
3888 	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3889 		if (adev->debugfs_regs[i]) {
3890 			debugfs_remove(adev->debugfs_regs[i]);
3891 			adev->debugfs_regs[i] = NULL;
3892 		}
3893 	}
3894 }
3895 
3896 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3897 {
3898 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3899 	struct drm_device *dev = node->minor->dev;
3900 	struct amdgpu_device *adev = dev->dev_private;
3901 	int r = 0, i;
3902 
3903 	/* hold on the scheduler */
3904 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3905 		struct amdgpu_ring *ring = adev->rings[i];
3906 
3907 		if (!ring || !ring->sched.thread)
3908 			continue;
3909 		kthread_park(ring->sched.thread);
3910 	}
3911 
3912 	seq_printf(m, "run ib test:\n");
3913 	r = amdgpu_ib_ring_tests(adev);
3914 	if (r)
3915 		seq_printf(m, "ib ring tests failed (%d).\n", r);
3916 	else
3917 		seq_printf(m, "ib ring tests passed.\n");
3918 
3919 	/* go on the scheduler */
3920 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3921 		struct amdgpu_ring *ring = adev->rings[i];
3922 
3923 		if (!ring || !ring->sched.thread)
3924 			continue;
3925 		kthread_unpark(ring->sched.thread);
3926 	}
3927 
3928 	return 0;
3929 }
3930 
3931 static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3932 	{"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3933 };
3934 
3935 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3936 {
3937 	return amdgpu_debugfs_add_files(adev,
3938 					amdgpu_debugfs_test_ib_ring_list, 1);
3939 }
3940 
3941 int amdgpu_debugfs_init(struct drm_minor *minor)
3942 {
3943 	return 0;
3944 }
3945 
3946 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
3947 {
3948 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3949 	struct drm_device *dev = node->minor->dev;
3950 	struct amdgpu_device *adev = dev->dev_private;
3951 
3952 	seq_write(m, adev->bios, adev->bios_size);
3953 	return 0;
3954 }
3955 
3956 static const struct drm_info_list amdgpu_vbios_dump_list[] = {
3957 		{"amdgpu_vbios",
3958 		 amdgpu_debugfs_get_vbios_dump,
3959 		 0, NULL},
3960 };
3961 
3962 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3963 {
3964 	return amdgpu_debugfs_add_files(adev,
3965 					amdgpu_vbios_dump_list, 1);
3966 }
3967 #else
3968 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3969 {
3970 	return 0;
3971 }
3972 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3973 {
3974 	return 0;
3975 }
3976 static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
3977 {
3978 	return 0;
3979 }
3980 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
3981 #endif
3982