1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60 
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63 
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
69 
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
73 
74 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
76 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
85 
86 #define AMDGPU_RESUME_MS		2000
87 
88 const char *amdgpu_asic_name[] = {
89 	"TAHITI",
90 	"PITCAIRN",
91 	"VERDE",
92 	"OLAND",
93 	"HAINAN",
94 	"BONAIRE",
95 	"KAVERI",
96 	"KABINI",
97 	"HAWAII",
98 	"MULLINS",
99 	"TOPAZ",
100 	"TONGA",
101 	"FIJI",
102 	"CARRIZO",
103 	"STONEY",
104 	"POLARIS10",
105 	"POLARIS11",
106 	"POLARIS12",
107 	"VEGAM",
108 	"VEGA10",
109 	"VEGA12",
110 	"VEGA20",
111 	"RAVEN",
112 	"ARCTURUS",
113 	"RENOIR",
114 	"ALDEBARAN",
115 	"NAVI10",
116 	"NAVI14",
117 	"NAVI12",
118 	"SIENNA_CICHLID",
119 	"NAVY_FLOUNDER",
120 	"VANGOGH",
121 	"DIMGREY_CAVEFISH",
122 	"LAST",
123 };
124 
125 /**
126  * DOC: pcie_replay_count
127  *
128  * The amdgpu driver provides a sysfs API for reporting the total number
129  * of PCIe replays (NAKs)
130  * The file pcie_replay_count is used for this and returns the total
131  * number of replays as a sum of the NAKs generated and NAKs received
132  */
133 
134 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
135 		struct device_attribute *attr, char *buf)
136 {
137 	struct drm_device *ddev = dev_get_drvdata(dev);
138 	struct amdgpu_device *adev = drm_to_adev(ddev);
139 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
140 
141 	return sysfs_emit(buf, "%llu\n", cnt);
142 }
143 
144 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
145 		amdgpu_device_get_pcie_replay_count, NULL);
146 
147 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
148 
149 /**
150  * DOC: product_name
151  *
152  * The amdgpu driver provides a sysfs API for reporting the product name
153  * for the device
154  * The file serial_number is used for this and returns the product name
155  * as returned from the FRU.
156  * NOTE: This is only available for certain server cards
157  */
158 
159 static ssize_t amdgpu_device_get_product_name(struct device *dev,
160 		struct device_attribute *attr, char *buf)
161 {
162 	struct drm_device *ddev = dev_get_drvdata(dev);
163 	struct amdgpu_device *adev = drm_to_adev(ddev);
164 
165 	return sysfs_emit(buf, "%s\n", adev->product_name);
166 }
167 
168 static DEVICE_ATTR(product_name, S_IRUGO,
169 		amdgpu_device_get_product_name, NULL);
170 
171 /**
172  * DOC: product_number
173  *
174  * The amdgpu driver provides a sysfs API for reporting the part number
175  * for the device
176  * The file serial_number is used for this and returns the part number
177  * as returned from the FRU.
178  * NOTE: This is only available for certain server cards
179  */
180 
181 static ssize_t amdgpu_device_get_product_number(struct device *dev,
182 		struct device_attribute *attr, char *buf)
183 {
184 	struct drm_device *ddev = dev_get_drvdata(dev);
185 	struct amdgpu_device *adev = drm_to_adev(ddev);
186 
187 	return sysfs_emit(buf, "%s\n", adev->product_number);
188 }
189 
190 static DEVICE_ATTR(product_number, S_IRUGO,
191 		amdgpu_device_get_product_number, NULL);
192 
193 /**
194  * DOC: serial_number
195  *
196  * The amdgpu driver provides a sysfs API for reporting the serial number
197  * for the device
198  * The file serial_number is used for this and returns the serial number
199  * as returned from the FRU.
200  * NOTE: This is only available for certain server cards
201  */
202 
203 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
204 		struct device_attribute *attr, char *buf)
205 {
206 	struct drm_device *ddev = dev_get_drvdata(dev);
207 	struct amdgpu_device *adev = drm_to_adev(ddev);
208 
209 	return sysfs_emit(buf, "%s\n", adev->serial);
210 }
211 
212 static DEVICE_ATTR(serial_number, S_IRUGO,
213 		amdgpu_device_get_serial_number, NULL);
214 
215 /**
216  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
217  *
218  * @dev: drm_device pointer
219  *
220  * Returns true if the device is a dGPU with ATPX power control,
221  * otherwise return false.
222  */
223 bool amdgpu_device_supports_px(struct drm_device *dev)
224 {
225 	struct amdgpu_device *adev = drm_to_adev(dev);
226 
227 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
228 		return true;
229 	return false;
230 }
231 
232 /**
233  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
234  *
235  * @dev: drm_device pointer
236  *
237  * Returns true if the device is a dGPU with ACPI power control,
238  * otherwise return false.
239  */
240 bool amdgpu_device_supports_boco(struct drm_device *dev)
241 {
242 	struct amdgpu_device *adev = drm_to_adev(dev);
243 
244 	if (adev->has_pr3 ||
245 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
246 		return true;
247 	return false;
248 }
249 
250 /**
251  * amdgpu_device_supports_baco - Does the device support BACO
252  *
253  * @dev: drm_device pointer
254  *
255  * Returns true if the device supporte BACO,
256  * otherwise return false.
257  */
258 bool amdgpu_device_supports_baco(struct drm_device *dev)
259 {
260 	struct amdgpu_device *adev = drm_to_adev(dev);
261 
262 	return amdgpu_asic_supports_baco(adev);
263 }
264 
265 /*
266  * VRAM access helper functions
267  */
268 
269 /**
270  * amdgpu_device_vram_access - read/write a buffer in vram
271  *
272  * @adev: amdgpu_device pointer
273  * @pos: offset of the buffer in vram
274  * @buf: virtual address of the buffer in system memory
275  * @size: read/write size, sizeof(@buf) must > @size
276  * @write: true - write to vram, otherwise - read from vram
277  */
278 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
279 			       uint32_t *buf, size_t size, bool write)
280 {
281 	unsigned long flags;
282 	uint32_t hi = ~0;
283 	uint64_t last;
284 
285 
286 #ifdef CONFIG_64BIT
287 	last = min(pos + size, adev->gmc.visible_vram_size);
288 	if (last > pos) {
289 		void __iomem *addr = adev->mman.aper_base_kaddr + pos;
290 		size_t count = last - pos;
291 
292 		if (write) {
293 			memcpy_toio(addr, buf, count);
294 			mb();
295 			amdgpu_asic_flush_hdp(adev, NULL);
296 		} else {
297 			amdgpu_asic_invalidate_hdp(adev, NULL);
298 			mb();
299 			memcpy_fromio(buf, addr, count);
300 		}
301 
302 		if (count == size)
303 			return;
304 
305 		pos += count;
306 		buf += count / 4;
307 		size -= count;
308 	}
309 #endif
310 
311 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
312 	for (last = pos + size; pos < last; pos += 4) {
313 		uint32_t tmp = pos >> 31;
314 
315 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
316 		if (tmp != hi) {
317 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
318 			hi = tmp;
319 		}
320 		if (write)
321 			WREG32_NO_KIQ(mmMM_DATA, *buf++);
322 		else
323 			*buf++ = RREG32_NO_KIQ(mmMM_DATA);
324 	}
325 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
326 }
327 
328 /*
329  * register access helper functions.
330  */
331 
332 /* Check if hw access should be skipped because of hotplug or device error */
333 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
334 {
335 	if (adev->in_pci_err_recovery)
336 		return true;
337 
338 #ifdef CONFIG_LOCKDEP
339 	/*
340 	 * This is a bit complicated to understand, so worth a comment. What we assert
341 	 * here is that the GPU reset is not running on another thread in parallel.
342 	 *
343 	 * For this we trylock the read side of the reset semaphore, if that succeeds
344 	 * we know that the reset is not running in paralell.
345 	 *
346 	 * If the trylock fails we assert that we are either already holding the read
347 	 * side of the lock or are the reset thread itself and hold the write side of
348 	 * the lock.
349 	 */
350 	if (in_task()) {
351 		if (down_read_trylock(&adev->reset_sem))
352 			up_read(&adev->reset_sem);
353 		else
354 			lockdep_assert_held(&adev->reset_sem);
355 	}
356 #endif
357 	return false;
358 }
359 
360 /**
361  * amdgpu_device_rreg - read a memory mapped IO or indirect register
362  *
363  * @adev: amdgpu_device pointer
364  * @reg: dword aligned register offset
365  * @acc_flags: access flags which require special behavior
366  *
367  * Returns the 32 bit value from the offset specified.
368  */
369 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
370 			    uint32_t reg, uint32_t acc_flags)
371 {
372 	uint32_t ret;
373 
374 	if (amdgpu_device_skip_hw_access(adev))
375 		return 0;
376 
377 	if ((reg * 4) < adev->rmmio_size) {
378 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
379 		    amdgpu_sriov_runtime(adev) &&
380 		    down_read_trylock(&adev->reset_sem)) {
381 			ret = amdgpu_kiq_rreg(adev, reg);
382 			up_read(&adev->reset_sem);
383 		} else {
384 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
385 		}
386 	} else {
387 		ret = adev->pcie_rreg(adev, reg * 4);
388 	}
389 
390 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
391 
392 	return ret;
393 }
394 
395 /*
396  * MMIO register read with bytes helper functions
397  * @offset:bytes offset from MMIO start
398  *
399 */
400 
401 /**
402  * amdgpu_mm_rreg8 - read a memory mapped IO register
403  *
404  * @adev: amdgpu_device pointer
405  * @offset: byte aligned register offset
406  *
407  * Returns the 8 bit value from the offset specified.
408  */
409 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
410 {
411 	if (amdgpu_device_skip_hw_access(adev))
412 		return 0;
413 
414 	if (offset < adev->rmmio_size)
415 		return (readb(adev->rmmio + offset));
416 	BUG();
417 }
418 
419 /*
420  * MMIO register write with bytes helper functions
421  * @offset:bytes offset from MMIO start
422  * @value: the value want to be written to the register
423  *
424 */
425 /**
426  * amdgpu_mm_wreg8 - read a memory mapped IO register
427  *
428  * @adev: amdgpu_device pointer
429  * @offset: byte aligned register offset
430  * @value: 8 bit value to write
431  *
432  * Writes the value specified to the offset specified.
433  */
434 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
435 {
436 	if (amdgpu_device_skip_hw_access(adev))
437 		return;
438 
439 	if (offset < adev->rmmio_size)
440 		writeb(value, adev->rmmio + offset);
441 	else
442 		BUG();
443 }
444 
445 /**
446  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
447  *
448  * @adev: amdgpu_device pointer
449  * @reg: dword aligned register offset
450  * @v: 32 bit value to write to the register
451  * @acc_flags: access flags which require special behavior
452  *
453  * Writes the value specified to the offset specified.
454  */
455 void amdgpu_device_wreg(struct amdgpu_device *adev,
456 			uint32_t reg, uint32_t v,
457 			uint32_t acc_flags)
458 {
459 	if (amdgpu_device_skip_hw_access(adev))
460 		return;
461 
462 	if ((reg * 4) < adev->rmmio_size) {
463 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
464 		    amdgpu_sriov_runtime(adev) &&
465 		    down_read_trylock(&adev->reset_sem)) {
466 			amdgpu_kiq_wreg(adev, reg, v);
467 			up_read(&adev->reset_sem);
468 		} else {
469 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
470 		}
471 	} else {
472 		adev->pcie_wreg(adev, reg * 4, v);
473 	}
474 
475 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
476 }
477 
478 /*
479  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
480  *
481  * this function is invoked only the debugfs register access
482  * */
483 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
484 			     uint32_t reg, uint32_t v)
485 {
486 	if (amdgpu_device_skip_hw_access(adev))
487 		return;
488 
489 	if (amdgpu_sriov_fullaccess(adev) &&
490 	    adev->gfx.rlc.funcs &&
491 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
492 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
493 			return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
494 	} else {
495 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
496 	}
497 }
498 
499 /**
500  * amdgpu_mm_rdoorbell - read a doorbell dword
501  *
502  * @adev: amdgpu_device pointer
503  * @index: doorbell index
504  *
505  * Returns the value in the doorbell aperture at the
506  * requested doorbell index (CIK).
507  */
508 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
509 {
510 	if (amdgpu_device_skip_hw_access(adev))
511 		return 0;
512 
513 	if (index < adev->doorbell.num_doorbells) {
514 		return readl(adev->doorbell.ptr + index);
515 	} else {
516 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
517 		return 0;
518 	}
519 }
520 
521 /**
522  * amdgpu_mm_wdoorbell - write a doorbell dword
523  *
524  * @adev: amdgpu_device pointer
525  * @index: doorbell index
526  * @v: value to write
527  *
528  * Writes @v to the doorbell aperture at the
529  * requested doorbell index (CIK).
530  */
531 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
532 {
533 	if (amdgpu_device_skip_hw_access(adev))
534 		return;
535 
536 	if (index < adev->doorbell.num_doorbells) {
537 		writel(v, adev->doorbell.ptr + index);
538 	} else {
539 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
540 	}
541 }
542 
543 /**
544  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
545  *
546  * @adev: amdgpu_device pointer
547  * @index: doorbell index
548  *
549  * Returns the value in the doorbell aperture at the
550  * requested doorbell index (VEGA10+).
551  */
552 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
553 {
554 	if (amdgpu_device_skip_hw_access(adev))
555 		return 0;
556 
557 	if (index < adev->doorbell.num_doorbells) {
558 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
559 	} else {
560 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
561 		return 0;
562 	}
563 }
564 
565 /**
566  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
567  *
568  * @adev: amdgpu_device pointer
569  * @index: doorbell index
570  * @v: value to write
571  *
572  * Writes @v to the doorbell aperture at the
573  * requested doorbell index (VEGA10+).
574  */
575 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
576 {
577 	if (amdgpu_device_skip_hw_access(adev))
578 		return;
579 
580 	if (index < adev->doorbell.num_doorbells) {
581 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
582 	} else {
583 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
584 	}
585 }
586 
587 /**
588  * amdgpu_device_indirect_rreg - read an indirect register
589  *
590  * @adev: amdgpu_device pointer
591  * @pcie_index: mmio register offset
592  * @pcie_data: mmio register offset
593  * @reg_addr: indirect register address to read from
594  *
595  * Returns the value of indirect register @reg_addr
596  */
597 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
598 				u32 pcie_index, u32 pcie_data,
599 				u32 reg_addr)
600 {
601 	unsigned long flags;
602 	u32 r;
603 	void __iomem *pcie_index_offset;
604 	void __iomem *pcie_data_offset;
605 
606 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
607 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
608 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
609 
610 	writel(reg_addr, pcie_index_offset);
611 	readl(pcie_index_offset);
612 	r = readl(pcie_data_offset);
613 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
614 
615 	return r;
616 }
617 
618 /**
619  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
620  *
621  * @adev: amdgpu_device pointer
622  * @pcie_index: mmio register offset
623  * @pcie_data: mmio register offset
624  * @reg_addr: indirect register address to read from
625  *
626  * Returns the value of indirect register @reg_addr
627  */
628 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
629 				  u32 pcie_index, u32 pcie_data,
630 				  u32 reg_addr)
631 {
632 	unsigned long flags;
633 	u64 r;
634 	void __iomem *pcie_index_offset;
635 	void __iomem *pcie_data_offset;
636 
637 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
638 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
639 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
640 
641 	/* read low 32 bits */
642 	writel(reg_addr, pcie_index_offset);
643 	readl(pcie_index_offset);
644 	r = readl(pcie_data_offset);
645 	/* read high 32 bits */
646 	writel(reg_addr + 4, pcie_index_offset);
647 	readl(pcie_index_offset);
648 	r |= ((u64)readl(pcie_data_offset) << 32);
649 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
650 
651 	return r;
652 }
653 
654 /**
655  * amdgpu_device_indirect_wreg - write an indirect register address
656  *
657  * @adev: amdgpu_device pointer
658  * @pcie_index: mmio register offset
659  * @pcie_data: mmio register offset
660  * @reg_addr: indirect register offset
661  * @reg_data: indirect register data
662  *
663  */
664 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
665 				 u32 pcie_index, u32 pcie_data,
666 				 u32 reg_addr, u32 reg_data)
667 {
668 	unsigned long flags;
669 	void __iomem *pcie_index_offset;
670 	void __iomem *pcie_data_offset;
671 
672 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
673 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
674 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
675 
676 	writel(reg_addr, pcie_index_offset);
677 	readl(pcie_index_offset);
678 	writel(reg_data, pcie_data_offset);
679 	readl(pcie_data_offset);
680 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
681 }
682 
683 /**
684  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
685  *
686  * @adev: amdgpu_device pointer
687  * @pcie_index: mmio register offset
688  * @pcie_data: mmio register offset
689  * @reg_addr: indirect register offset
690  * @reg_data: indirect register data
691  *
692  */
693 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
694 				   u32 pcie_index, u32 pcie_data,
695 				   u32 reg_addr, u64 reg_data)
696 {
697 	unsigned long flags;
698 	void __iomem *pcie_index_offset;
699 	void __iomem *pcie_data_offset;
700 
701 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
702 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
703 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
704 
705 	/* write low 32 bits */
706 	writel(reg_addr, pcie_index_offset);
707 	readl(pcie_index_offset);
708 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
709 	readl(pcie_data_offset);
710 	/* write high 32 bits */
711 	writel(reg_addr + 4, pcie_index_offset);
712 	readl(pcie_index_offset);
713 	writel((u32)(reg_data >> 32), pcie_data_offset);
714 	readl(pcie_data_offset);
715 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
716 }
717 
718 /**
719  * amdgpu_invalid_rreg - dummy reg read function
720  *
721  * @adev: amdgpu_device pointer
722  * @reg: offset of register
723  *
724  * Dummy register read function.  Used for register blocks
725  * that certain asics don't have (all asics).
726  * Returns the value in the register.
727  */
728 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
729 {
730 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
731 	BUG();
732 	return 0;
733 }
734 
735 /**
736  * amdgpu_invalid_wreg - dummy reg write function
737  *
738  * @adev: amdgpu_device pointer
739  * @reg: offset of register
740  * @v: value to write to the register
741  *
742  * Dummy register read function.  Used for register blocks
743  * that certain asics don't have (all asics).
744  */
745 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
746 {
747 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
748 		  reg, v);
749 	BUG();
750 }
751 
752 /**
753  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
754  *
755  * @adev: amdgpu_device pointer
756  * @reg: offset of register
757  *
758  * Dummy register read function.  Used for register blocks
759  * that certain asics don't have (all asics).
760  * Returns the value in the register.
761  */
762 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
763 {
764 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
765 	BUG();
766 	return 0;
767 }
768 
769 /**
770  * amdgpu_invalid_wreg64 - dummy reg write function
771  *
772  * @adev: amdgpu_device pointer
773  * @reg: offset of register
774  * @v: value to write to the register
775  *
776  * Dummy register read function.  Used for register blocks
777  * that certain asics don't have (all asics).
778  */
779 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
780 {
781 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
782 		  reg, v);
783 	BUG();
784 }
785 
786 /**
787  * amdgpu_block_invalid_rreg - dummy reg read function
788  *
789  * @adev: amdgpu_device pointer
790  * @block: offset of instance
791  * @reg: offset of register
792  *
793  * Dummy register read function.  Used for register blocks
794  * that certain asics don't have (all asics).
795  * Returns the value in the register.
796  */
797 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
798 					  uint32_t block, uint32_t reg)
799 {
800 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
801 		  reg, block);
802 	BUG();
803 	return 0;
804 }
805 
806 /**
807  * amdgpu_block_invalid_wreg - dummy reg write function
808  *
809  * @adev: amdgpu_device pointer
810  * @block: offset of instance
811  * @reg: offset of register
812  * @v: value to write to the register
813  *
814  * Dummy register read function.  Used for register blocks
815  * that certain asics don't have (all asics).
816  */
817 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
818 				      uint32_t block,
819 				      uint32_t reg, uint32_t v)
820 {
821 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
822 		  reg, block, v);
823 	BUG();
824 }
825 
826 /**
827  * amdgpu_device_asic_init - Wrapper for atom asic_init
828  *
829  * @adev: amdgpu_device pointer
830  *
831  * Does any asic specific work and then calls atom asic init.
832  */
833 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
834 {
835 	amdgpu_asic_pre_asic_init(adev);
836 
837 	return amdgpu_atom_asic_init(adev->mode_info.atom_context);
838 }
839 
840 /**
841  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
842  *
843  * @adev: amdgpu_device pointer
844  *
845  * Allocates a scratch page of VRAM for use by various things in the
846  * driver.
847  */
848 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
849 {
850 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
851 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
852 				       &adev->vram_scratch.robj,
853 				       &adev->vram_scratch.gpu_addr,
854 				       (void **)&adev->vram_scratch.ptr);
855 }
856 
857 /**
858  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
859  *
860  * @adev: amdgpu_device pointer
861  *
862  * Frees the VRAM scratch page.
863  */
864 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
865 {
866 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
867 }
868 
869 /**
870  * amdgpu_device_program_register_sequence - program an array of registers.
871  *
872  * @adev: amdgpu_device pointer
873  * @registers: pointer to the register array
874  * @array_size: size of the register array
875  *
876  * Programs an array or registers with and and or masks.
877  * This is a helper for setting golden registers.
878  */
879 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
880 					     const u32 *registers,
881 					     const u32 array_size)
882 {
883 	u32 tmp, reg, and_mask, or_mask;
884 	int i;
885 
886 	if (array_size % 3)
887 		return;
888 
889 	for (i = 0; i < array_size; i +=3) {
890 		reg = registers[i + 0];
891 		and_mask = registers[i + 1];
892 		or_mask = registers[i + 2];
893 
894 		if (and_mask == 0xffffffff) {
895 			tmp = or_mask;
896 		} else {
897 			tmp = RREG32(reg);
898 			tmp &= ~and_mask;
899 			if (adev->family >= AMDGPU_FAMILY_AI)
900 				tmp |= (or_mask & and_mask);
901 			else
902 				tmp |= or_mask;
903 		}
904 		WREG32(reg, tmp);
905 	}
906 }
907 
908 /**
909  * amdgpu_device_pci_config_reset - reset the GPU
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Resets the GPU using the pci config reset sequence.
914  * Only applicable to asics prior to vega10.
915  */
916 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
917 {
918 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
919 }
920 
921 /**
922  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
923  *
924  * @adev: amdgpu_device pointer
925  *
926  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
927  */
928 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
929 {
930 	return pci_reset_function(adev->pdev);
931 }
932 
933 /*
934  * GPU doorbell aperture helpers function.
935  */
936 /**
937  * amdgpu_device_doorbell_init - Init doorbell driver information.
938  *
939  * @adev: amdgpu_device pointer
940  *
941  * Init doorbell driver information (CIK)
942  * Returns 0 on success, error on failure.
943  */
944 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
945 {
946 
947 	/* No doorbell on SI hardware generation */
948 	if (adev->asic_type < CHIP_BONAIRE) {
949 		adev->doorbell.base = 0;
950 		adev->doorbell.size = 0;
951 		adev->doorbell.num_doorbells = 0;
952 		adev->doorbell.ptr = NULL;
953 		return 0;
954 	}
955 
956 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
957 		return -EINVAL;
958 
959 	amdgpu_asic_init_doorbell_index(adev);
960 
961 	/* doorbell bar mapping */
962 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
963 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
964 
965 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
966 					     adev->doorbell_index.max_assignment+1);
967 	if (adev->doorbell.num_doorbells == 0)
968 		return -EINVAL;
969 
970 	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
971 	 * paging queue doorbell use the second page. The
972 	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
973 	 * doorbells are in the first page. So with paging queue enabled,
974 	 * the max num_doorbells should + 1 page (0x400 in dword)
975 	 */
976 	if (adev->asic_type >= CHIP_VEGA10)
977 		adev->doorbell.num_doorbells += 0x400;
978 
979 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
980 				     adev->doorbell.num_doorbells *
981 				     sizeof(u32));
982 	if (adev->doorbell.ptr == NULL)
983 		return -ENOMEM;
984 
985 	return 0;
986 }
987 
988 /**
989  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
990  *
991  * @adev: amdgpu_device pointer
992  *
993  * Tear down doorbell driver information (CIK)
994  */
995 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
996 {
997 	iounmap(adev->doorbell.ptr);
998 	adev->doorbell.ptr = NULL;
999 }
1000 
1001 
1002 
1003 /*
1004  * amdgpu_device_wb_*()
1005  * Writeback is the method by which the GPU updates special pages in memory
1006  * with the status of certain GPU events (fences, ring pointers,etc.).
1007  */
1008 
1009 /**
1010  * amdgpu_device_wb_fini - Disable Writeback and free memory
1011  *
1012  * @adev: amdgpu_device pointer
1013  *
1014  * Disables Writeback and frees the Writeback memory (all asics).
1015  * Used at driver shutdown.
1016  */
1017 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1018 {
1019 	if (adev->wb.wb_obj) {
1020 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1021 				      &adev->wb.gpu_addr,
1022 				      (void **)&adev->wb.wb);
1023 		adev->wb.wb_obj = NULL;
1024 	}
1025 }
1026 
1027 /**
1028  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1029  *
1030  * @adev: amdgpu_device pointer
1031  *
1032  * Initializes writeback and allocates writeback memory (all asics).
1033  * Used at driver startup.
1034  * Returns 0 on success or an -error on failure.
1035  */
1036 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1037 {
1038 	int r;
1039 
1040 	if (adev->wb.wb_obj == NULL) {
1041 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1042 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1043 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1044 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1045 					    (void **)&adev->wb.wb);
1046 		if (r) {
1047 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1048 			return r;
1049 		}
1050 
1051 		adev->wb.num_wb = AMDGPU_MAX_WB;
1052 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1053 
1054 		/* clear wb memory */
1055 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * amdgpu_device_wb_get - Allocate a wb entry
1063  *
1064  * @adev: amdgpu_device pointer
1065  * @wb: wb index
1066  *
1067  * Allocate a wb slot for use by the driver (all asics).
1068  * Returns 0 on success or -EINVAL on failure.
1069  */
1070 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1071 {
1072 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1073 
1074 	if (offset < adev->wb.num_wb) {
1075 		__set_bit(offset, adev->wb.used);
1076 		*wb = offset << 3; /* convert to dw offset */
1077 		return 0;
1078 	} else {
1079 		return -EINVAL;
1080 	}
1081 }
1082 
1083 /**
1084  * amdgpu_device_wb_free - Free a wb entry
1085  *
1086  * @adev: amdgpu_device pointer
1087  * @wb: wb index
1088  *
1089  * Free a wb slot allocated for use by the driver (all asics)
1090  */
1091 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1092 {
1093 	wb >>= 3;
1094 	if (wb < adev->wb.num_wb)
1095 		__clear_bit(wb, adev->wb.used);
1096 }
1097 
1098 /**
1099  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1100  *
1101  * @adev: amdgpu_device pointer
1102  *
1103  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1104  * to fail, but if any of the BARs is not accessible after the size we abort
1105  * driver loading by returning -ENODEV.
1106  */
1107 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1108 {
1109 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1110 	struct pci_bus *root;
1111 	struct resource *res;
1112 	unsigned i;
1113 	u16 cmd;
1114 	int r;
1115 
1116 	/* Bypass for VF */
1117 	if (amdgpu_sriov_vf(adev))
1118 		return 0;
1119 
1120 	/* skip if the bios has already enabled large BAR */
1121 	if (adev->gmc.real_vram_size &&
1122 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1123 		return 0;
1124 
1125 	/* Check if the root BUS has 64bit memory resources */
1126 	root = adev->pdev->bus;
1127 	while (root->parent)
1128 		root = root->parent;
1129 
1130 	pci_bus_for_each_resource(root, res, i) {
1131 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1132 		    res->start > 0x100000000ull)
1133 			break;
1134 	}
1135 
1136 	/* Trying to resize is pointless without a root hub window above 4GB */
1137 	if (!res)
1138 		return 0;
1139 
1140 	/* Limit the BAR size to what is available */
1141 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1142 			rbar_size);
1143 
1144 	/* Disable memory decoding while we change the BAR addresses and size */
1145 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1146 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1147 			      cmd & ~PCI_COMMAND_MEMORY);
1148 
1149 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1150 	amdgpu_device_doorbell_fini(adev);
1151 	if (adev->asic_type >= CHIP_BONAIRE)
1152 		pci_release_resource(adev->pdev, 2);
1153 
1154 	pci_release_resource(adev->pdev, 0);
1155 
1156 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1157 	if (r == -ENOSPC)
1158 		DRM_INFO("Not enough PCI address space for a large BAR.");
1159 	else if (r && r != -ENOTSUPP)
1160 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1161 
1162 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1163 
1164 	/* When the doorbell or fb BAR isn't available we have no chance of
1165 	 * using the device.
1166 	 */
1167 	r = amdgpu_device_doorbell_init(adev);
1168 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1169 		return -ENODEV;
1170 
1171 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1172 
1173 	return 0;
1174 }
1175 
1176 /*
1177  * GPU helpers function.
1178  */
1179 /**
1180  * amdgpu_device_need_post - check if the hw need post or not
1181  *
1182  * @adev: amdgpu_device pointer
1183  *
1184  * Check if the asic has been initialized (all asics) at driver startup
1185  * or post is needed if  hw reset is performed.
1186  * Returns true if need or false if not.
1187  */
1188 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1189 {
1190 	uint32_t reg;
1191 
1192 	if (amdgpu_sriov_vf(adev))
1193 		return false;
1194 
1195 	if (amdgpu_passthrough(adev)) {
1196 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1197 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1198 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1199 		 * vpost executed for smc version below 22.15
1200 		 */
1201 		if (adev->asic_type == CHIP_FIJI) {
1202 			int err;
1203 			uint32_t fw_ver;
1204 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1205 			/* force vPost if error occured */
1206 			if (err)
1207 				return true;
1208 
1209 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1210 			if (fw_ver < 0x00160e00)
1211 				return true;
1212 		}
1213 	}
1214 
1215 	/* Don't post if we need to reset whole hive on init */
1216 	if (adev->gmc.xgmi.pending_reset)
1217 		return false;
1218 
1219 	if (adev->has_hw_reset) {
1220 		adev->has_hw_reset = false;
1221 		return true;
1222 	}
1223 
1224 	/* bios scratch used on CIK+ */
1225 	if (adev->asic_type >= CHIP_BONAIRE)
1226 		return amdgpu_atombios_scratch_need_asic_init(adev);
1227 
1228 	/* check MEM_SIZE for older asics */
1229 	reg = amdgpu_asic_get_config_memsize(adev);
1230 
1231 	if ((reg != 0) && (reg != 0xffffffff))
1232 		return false;
1233 
1234 	return true;
1235 }
1236 
1237 /* if we get transitioned to only one device, take VGA back */
1238 /**
1239  * amdgpu_device_vga_set_decode - enable/disable vga decode
1240  *
1241  * @cookie: amdgpu_device pointer
1242  * @state: enable/disable vga decode
1243  *
1244  * Enable/disable vga decode (all asics).
1245  * Returns VGA resource flags.
1246  */
1247 static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
1248 {
1249 	struct amdgpu_device *adev = cookie;
1250 	amdgpu_asic_set_vga_state(adev, state);
1251 	if (state)
1252 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1253 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1254 	else
1255 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1256 }
1257 
1258 /**
1259  * amdgpu_device_check_block_size - validate the vm block size
1260  *
1261  * @adev: amdgpu_device pointer
1262  *
1263  * Validates the vm block size specified via module parameter.
1264  * The vm block size defines number of bits in page table versus page directory,
1265  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1266  * page table and the remaining bits are in the page directory.
1267  */
1268 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1269 {
1270 	/* defines number of bits in page table versus page directory,
1271 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1272 	 * page table and the remaining bits are in the page directory */
1273 	if (amdgpu_vm_block_size == -1)
1274 		return;
1275 
1276 	if (amdgpu_vm_block_size < 9) {
1277 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1278 			 amdgpu_vm_block_size);
1279 		amdgpu_vm_block_size = -1;
1280 	}
1281 }
1282 
1283 /**
1284  * amdgpu_device_check_vm_size - validate the vm size
1285  *
1286  * @adev: amdgpu_device pointer
1287  *
1288  * Validates the vm size in GB specified via module parameter.
1289  * The VM size is the size of the GPU virtual memory space in GB.
1290  */
1291 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1292 {
1293 	/* no need to check the default value */
1294 	if (amdgpu_vm_size == -1)
1295 		return;
1296 
1297 	if (amdgpu_vm_size < 1) {
1298 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1299 			 amdgpu_vm_size);
1300 		amdgpu_vm_size = -1;
1301 	}
1302 }
1303 
1304 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1305 {
1306 	struct sysinfo si;
1307 	bool is_os_64 = (sizeof(void *) == 8);
1308 	uint64_t total_memory;
1309 	uint64_t dram_size_seven_GB = 0x1B8000000;
1310 	uint64_t dram_size_three_GB = 0xB8000000;
1311 
1312 	if (amdgpu_smu_memory_pool_size == 0)
1313 		return;
1314 
1315 	if (!is_os_64) {
1316 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1317 		goto def_value;
1318 	}
1319 	si_meminfo(&si);
1320 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1321 
1322 	if ((amdgpu_smu_memory_pool_size == 1) ||
1323 		(amdgpu_smu_memory_pool_size == 2)) {
1324 		if (total_memory < dram_size_three_GB)
1325 			goto def_value1;
1326 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1327 		(amdgpu_smu_memory_pool_size == 8)) {
1328 		if (total_memory < dram_size_seven_GB)
1329 			goto def_value1;
1330 	} else {
1331 		DRM_WARN("Smu memory pool size not supported\n");
1332 		goto def_value;
1333 	}
1334 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1335 
1336 	return;
1337 
1338 def_value1:
1339 	DRM_WARN("No enough system memory\n");
1340 def_value:
1341 	adev->pm.smu_prv_buffer_size = 0;
1342 }
1343 
1344 /**
1345  * amdgpu_device_check_arguments - validate module params
1346  *
1347  * @adev: amdgpu_device pointer
1348  *
1349  * Validates certain module parameters and updates
1350  * the associated values used by the driver (all asics).
1351  */
1352 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1353 {
1354 	if (amdgpu_sched_jobs < 4) {
1355 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1356 			 amdgpu_sched_jobs);
1357 		amdgpu_sched_jobs = 4;
1358 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1359 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1360 			 amdgpu_sched_jobs);
1361 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1362 	}
1363 
1364 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1365 		/* gart size must be greater or equal to 32M */
1366 		dev_warn(adev->dev, "gart size (%d) too small\n",
1367 			 amdgpu_gart_size);
1368 		amdgpu_gart_size = -1;
1369 	}
1370 
1371 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1372 		/* gtt size must be greater or equal to 32M */
1373 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1374 				 amdgpu_gtt_size);
1375 		amdgpu_gtt_size = -1;
1376 	}
1377 
1378 	/* valid range is between 4 and 9 inclusive */
1379 	if (amdgpu_vm_fragment_size != -1 &&
1380 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1381 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1382 		amdgpu_vm_fragment_size = -1;
1383 	}
1384 
1385 	if (amdgpu_sched_hw_submission < 2) {
1386 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1387 			 amdgpu_sched_hw_submission);
1388 		amdgpu_sched_hw_submission = 2;
1389 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1390 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1391 			 amdgpu_sched_hw_submission);
1392 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1393 	}
1394 
1395 	amdgpu_device_check_smu_prv_buffer_size(adev);
1396 
1397 	amdgpu_device_check_vm_size(adev);
1398 
1399 	amdgpu_device_check_block_size(adev);
1400 
1401 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1402 
1403 	amdgpu_gmc_tmz_set(adev);
1404 
1405 	amdgpu_gmc_noretry_set(adev);
1406 
1407 	return 0;
1408 }
1409 
1410 /**
1411  * amdgpu_switcheroo_set_state - set switcheroo state
1412  *
1413  * @pdev: pci dev pointer
1414  * @state: vga_switcheroo state
1415  *
1416  * Callback for the switcheroo driver.  Suspends or resumes the
1417  * the asics before or after it is powered up using ACPI methods.
1418  */
1419 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1420 					enum vga_switcheroo_state state)
1421 {
1422 	struct drm_device *dev = pci_get_drvdata(pdev);
1423 	int r;
1424 
1425 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1426 		return;
1427 
1428 	if (state == VGA_SWITCHEROO_ON) {
1429 		pr_info("switched on\n");
1430 		/* don't suspend or resume card normally */
1431 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1432 
1433 		pci_set_power_state(pdev, PCI_D0);
1434 		amdgpu_device_load_pci_state(pdev);
1435 		r = pci_enable_device(pdev);
1436 		if (r)
1437 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1438 		amdgpu_device_resume(dev, true);
1439 
1440 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1441 	} else {
1442 		pr_info("switched off\n");
1443 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1444 		amdgpu_device_suspend(dev, true);
1445 		amdgpu_device_cache_pci_state(pdev);
1446 		/* Shut down the device */
1447 		pci_disable_device(pdev);
1448 		pci_set_power_state(pdev, PCI_D3cold);
1449 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1450 	}
1451 }
1452 
1453 /**
1454  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1455  *
1456  * @pdev: pci dev pointer
1457  *
1458  * Callback for the switcheroo driver.  Check of the switcheroo
1459  * state can be changed.
1460  * Returns true if the state can be changed, false if not.
1461  */
1462 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1463 {
1464 	struct drm_device *dev = pci_get_drvdata(pdev);
1465 
1466 	/*
1467 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1468 	* locking inversion with the driver load path. And the access here is
1469 	* completely racy anyway. So don't bother with locking for now.
1470 	*/
1471 	return atomic_read(&dev->open_count) == 0;
1472 }
1473 
1474 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1475 	.set_gpu_state = amdgpu_switcheroo_set_state,
1476 	.reprobe = NULL,
1477 	.can_switch = amdgpu_switcheroo_can_switch,
1478 };
1479 
1480 /**
1481  * amdgpu_device_ip_set_clockgating_state - set the CG state
1482  *
1483  * @dev: amdgpu_device pointer
1484  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1485  * @state: clockgating state (gate or ungate)
1486  *
1487  * Sets the requested clockgating state for all instances of
1488  * the hardware IP specified.
1489  * Returns the error code from the last instance.
1490  */
1491 int amdgpu_device_ip_set_clockgating_state(void *dev,
1492 					   enum amd_ip_block_type block_type,
1493 					   enum amd_clockgating_state state)
1494 {
1495 	struct amdgpu_device *adev = dev;
1496 	int i, r = 0;
1497 
1498 	for (i = 0; i < adev->num_ip_blocks; i++) {
1499 		if (!adev->ip_blocks[i].status.valid)
1500 			continue;
1501 		if (adev->ip_blocks[i].version->type != block_type)
1502 			continue;
1503 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1504 			continue;
1505 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1506 			(void *)adev, state);
1507 		if (r)
1508 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1509 				  adev->ip_blocks[i].version->funcs->name, r);
1510 	}
1511 	return r;
1512 }
1513 
1514 /**
1515  * amdgpu_device_ip_set_powergating_state - set the PG state
1516  *
1517  * @dev: amdgpu_device pointer
1518  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1519  * @state: powergating state (gate or ungate)
1520  *
1521  * Sets the requested powergating state for all instances of
1522  * the hardware IP specified.
1523  * Returns the error code from the last instance.
1524  */
1525 int amdgpu_device_ip_set_powergating_state(void *dev,
1526 					   enum amd_ip_block_type block_type,
1527 					   enum amd_powergating_state state)
1528 {
1529 	struct amdgpu_device *adev = dev;
1530 	int i, r = 0;
1531 
1532 	for (i = 0; i < adev->num_ip_blocks; i++) {
1533 		if (!adev->ip_blocks[i].status.valid)
1534 			continue;
1535 		if (adev->ip_blocks[i].version->type != block_type)
1536 			continue;
1537 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1538 			continue;
1539 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1540 			(void *)adev, state);
1541 		if (r)
1542 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1543 				  adev->ip_blocks[i].version->funcs->name, r);
1544 	}
1545 	return r;
1546 }
1547 
1548 /**
1549  * amdgpu_device_ip_get_clockgating_state - get the CG state
1550  *
1551  * @adev: amdgpu_device pointer
1552  * @flags: clockgating feature flags
1553  *
1554  * Walks the list of IPs on the device and updates the clockgating
1555  * flags for each IP.
1556  * Updates @flags with the feature flags for each hardware IP where
1557  * clockgating is enabled.
1558  */
1559 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1560 					    u32 *flags)
1561 {
1562 	int i;
1563 
1564 	for (i = 0; i < adev->num_ip_blocks; i++) {
1565 		if (!adev->ip_blocks[i].status.valid)
1566 			continue;
1567 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1568 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1569 	}
1570 }
1571 
1572 /**
1573  * amdgpu_device_ip_wait_for_idle - wait for idle
1574  *
1575  * @adev: amdgpu_device pointer
1576  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1577  *
1578  * Waits for the request hardware IP to be idle.
1579  * Returns 0 for success or a negative error code on failure.
1580  */
1581 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1582 				   enum amd_ip_block_type block_type)
1583 {
1584 	int i, r;
1585 
1586 	for (i = 0; i < adev->num_ip_blocks; i++) {
1587 		if (!adev->ip_blocks[i].status.valid)
1588 			continue;
1589 		if (adev->ip_blocks[i].version->type == block_type) {
1590 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1591 			if (r)
1592 				return r;
1593 			break;
1594 		}
1595 	}
1596 	return 0;
1597 
1598 }
1599 
1600 /**
1601  * amdgpu_device_ip_is_idle - is the hardware IP idle
1602  *
1603  * @adev: amdgpu_device pointer
1604  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1605  *
1606  * Check if the hardware IP is idle or not.
1607  * Returns true if it the IP is idle, false if not.
1608  */
1609 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1610 			      enum amd_ip_block_type block_type)
1611 {
1612 	int i;
1613 
1614 	for (i = 0; i < adev->num_ip_blocks; i++) {
1615 		if (!adev->ip_blocks[i].status.valid)
1616 			continue;
1617 		if (adev->ip_blocks[i].version->type == block_type)
1618 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1619 	}
1620 	return true;
1621 
1622 }
1623 
1624 /**
1625  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1626  *
1627  * @adev: amdgpu_device pointer
1628  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1629  *
1630  * Returns a pointer to the hardware IP block structure
1631  * if it exists for the asic, otherwise NULL.
1632  */
1633 struct amdgpu_ip_block *
1634 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1635 			      enum amd_ip_block_type type)
1636 {
1637 	int i;
1638 
1639 	for (i = 0; i < adev->num_ip_blocks; i++)
1640 		if (adev->ip_blocks[i].version->type == type)
1641 			return &adev->ip_blocks[i];
1642 
1643 	return NULL;
1644 }
1645 
1646 /**
1647  * amdgpu_device_ip_block_version_cmp
1648  *
1649  * @adev: amdgpu_device pointer
1650  * @type: enum amd_ip_block_type
1651  * @major: major version
1652  * @minor: minor version
1653  *
1654  * return 0 if equal or greater
1655  * return 1 if smaller or the ip_block doesn't exist
1656  */
1657 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1658 				       enum amd_ip_block_type type,
1659 				       u32 major, u32 minor)
1660 {
1661 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1662 
1663 	if (ip_block && ((ip_block->version->major > major) ||
1664 			((ip_block->version->major == major) &&
1665 			(ip_block->version->minor >= minor))))
1666 		return 0;
1667 
1668 	return 1;
1669 }
1670 
1671 /**
1672  * amdgpu_device_ip_block_add
1673  *
1674  * @adev: amdgpu_device pointer
1675  * @ip_block_version: pointer to the IP to add
1676  *
1677  * Adds the IP block driver information to the collection of IPs
1678  * on the asic.
1679  */
1680 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1681 			       const struct amdgpu_ip_block_version *ip_block_version)
1682 {
1683 	if (!ip_block_version)
1684 		return -EINVAL;
1685 
1686 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1687 		  ip_block_version->funcs->name);
1688 
1689 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1690 
1691 	return 0;
1692 }
1693 
1694 /**
1695  * amdgpu_device_enable_virtual_display - enable virtual display feature
1696  *
1697  * @adev: amdgpu_device pointer
1698  *
1699  * Enabled the virtual display feature if the user has enabled it via
1700  * the module parameter virtual_display.  This feature provides a virtual
1701  * display hardware on headless boards or in virtualized environments.
1702  * This function parses and validates the configuration string specified by
1703  * the user and configues the virtual display configuration (number of
1704  * virtual connectors, crtcs, etc.) specified.
1705  */
1706 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1707 {
1708 	adev->enable_virtual_display = false;
1709 
1710 	if (amdgpu_virtual_display) {
1711 		const char *pci_address_name = pci_name(adev->pdev);
1712 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1713 
1714 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1715 		pciaddstr_tmp = pciaddstr;
1716 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1717 			pciaddname = strsep(&pciaddname_tmp, ",");
1718 			if (!strcmp("all", pciaddname)
1719 			    || !strcmp(pci_address_name, pciaddname)) {
1720 				long num_crtc;
1721 				int res = -1;
1722 
1723 				adev->enable_virtual_display = true;
1724 
1725 				if (pciaddname_tmp)
1726 					res = kstrtol(pciaddname_tmp, 10,
1727 						      &num_crtc);
1728 
1729 				if (!res) {
1730 					if (num_crtc < 1)
1731 						num_crtc = 1;
1732 					if (num_crtc > 6)
1733 						num_crtc = 6;
1734 					adev->mode_info.num_crtc = num_crtc;
1735 				} else {
1736 					adev->mode_info.num_crtc = 1;
1737 				}
1738 				break;
1739 			}
1740 		}
1741 
1742 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1743 			 amdgpu_virtual_display, pci_address_name,
1744 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1745 
1746 		kfree(pciaddstr);
1747 	}
1748 }
1749 
1750 /**
1751  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1752  *
1753  * @adev: amdgpu_device pointer
1754  *
1755  * Parses the asic configuration parameters specified in the gpu info
1756  * firmware and makes them availale to the driver for use in configuring
1757  * the asic.
1758  * Returns 0 on success, -EINVAL on failure.
1759  */
1760 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1761 {
1762 	const char *chip_name;
1763 	char fw_name[40];
1764 	int err;
1765 	const struct gpu_info_firmware_header_v1_0 *hdr;
1766 
1767 	adev->firmware.gpu_info_fw = NULL;
1768 
1769 	if (adev->mman.discovery_bin) {
1770 		amdgpu_discovery_get_gfx_info(adev);
1771 
1772 		/*
1773 		 * FIXME: The bounding box is still needed by Navi12, so
1774 		 * temporarily read it from gpu_info firmware. Should be droped
1775 		 * when DAL no longer needs it.
1776 		 */
1777 		if (adev->asic_type != CHIP_NAVI12)
1778 			return 0;
1779 	}
1780 
1781 	switch (adev->asic_type) {
1782 #ifdef CONFIG_DRM_AMDGPU_SI
1783 	case CHIP_VERDE:
1784 	case CHIP_TAHITI:
1785 	case CHIP_PITCAIRN:
1786 	case CHIP_OLAND:
1787 	case CHIP_HAINAN:
1788 #endif
1789 #ifdef CONFIG_DRM_AMDGPU_CIK
1790 	case CHIP_BONAIRE:
1791 	case CHIP_HAWAII:
1792 	case CHIP_KAVERI:
1793 	case CHIP_KABINI:
1794 	case CHIP_MULLINS:
1795 #endif
1796 	case CHIP_TOPAZ:
1797 	case CHIP_TONGA:
1798 	case CHIP_FIJI:
1799 	case CHIP_POLARIS10:
1800 	case CHIP_POLARIS11:
1801 	case CHIP_POLARIS12:
1802 	case CHIP_VEGAM:
1803 	case CHIP_CARRIZO:
1804 	case CHIP_STONEY:
1805 	case CHIP_VEGA20:
1806 	case CHIP_ALDEBARAN:
1807 	case CHIP_SIENNA_CICHLID:
1808 	case CHIP_NAVY_FLOUNDER:
1809 	case CHIP_DIMGREY_CAVEFISH:
1810 	default:
1811 		return 0;
1812 	case CHIP_VEGA10:
1813 		chip_name = "vega10";
1814 		break;
1815 	case CHIP_VEGA12:
1816 		chip_name = "vega12";
1817 		break;
1818 	case CHIP_RAVEN:
1819 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1820 			chip_name = "raven2";
1821 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1822 			chip_name = "picasso";
1823 		else
1824 			chip_name = "raven";
1825 		break;
1826 	case CHIP_ARCTURUS:
1827 		chip_name = "arcturus";
1828 		break;
1829 	case CHIP_RENOIR:
1830 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1831 			chip_name = "renoir";
1832 		else
1833 			chip_name = "green_sardine";
1834 		break;
1835 	case CHIP_NAVI10:
1836 		chip_name = "navi10";
1837 		break;
1838 	case CHIP_NAVI14:
1839 		chip_name = "navi14";
1840 		break;
1841 	case CHIP_NAVI12:
1842 		chip_name = "navi12";
1843 		break;
1844 	case CHIP_VANGOGH:
1845 		chip_name = "vangogh";
1846 		break;
1847 	}
1848 
1849 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1850 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1851 	if (err) {
1852 		dev_err(adev->dev,
1853 			"Failed to load gpu_info firmware \"%s\"\n",
1854 			fw_name);
1855 		goto out;
1856 	}
1857 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1858 	if (err) {
1859 		dev_err(adev->dev,
1860 			"Failed to validate gpu_info firmware \"%s\"\n",
1861 			fw_name);
1862 		goto out;
1863 	}
1864 
1865 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1866 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1867 
1868 	switch (hdr->version_major) {
1869 	case 1:
1870 	{
1871 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1872 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1873 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1874 
1875 		/*
1876 		 * Should be droped when DAL no longer needs it.
1877 		 */
1878 		if (adev->asic_type == CHIP_NAVI12)
1879 			goto parse_soc_bounding_box;
1880 
1881 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1882 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1883 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1884 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1885 		adev->gfx.config.max_texture_channel_caches =
1886 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1887 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1888 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1889 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1890 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1891 		adev->gfx.config.double_offchip_lds_buf =
1892 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1893 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1894 		adev->gfx.cu_info.max_waves_per_simd =
1895 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1896 		adev->gfx.cu_info.max_scratch_slots_per_cu =
1897 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1898 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1899 		if (hdr->version_minor >= 1) {
1900 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1901 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1902 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1903 			adev->gfx.config.num_sc_per_sh =
1904 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1905 			adev->gfx.config.num_packer_per_sc =
1906 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1907 		}
1908 
1909 parse_soc_bounding_box:
1910 		/*
1911 		 * soc bounding box info is not integrated in disocovery table,
1912 		 * we always need to parse it from gpu info firmware if needed.
1913 		 */
1914 		if (hdr->version_minor == 2) {
1915 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1916 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1917 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1918 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1919 		}
1920 		break;
1921 	}
1922 	default:
1923 		dev_err(adev->dev,
1924 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1925 		err = -EINVAL;
1926 		goto out;
1927 	}
1928 out:
1929 	return err;
1930 }
1931 
1932 /**
1933  * amdgpu_device_ip_early_init - run early init for hardware IPs
1934  *
1935  * @adev: amdgpu_device pointer
1936  *
1937  * Early initialization pass for hardware IPs.  The hardware IPs that make
1938  * up each asic are discovered each IP's early_init callback is run.  This
1939  * is the first stage in initializing the asic.
1940  * Returns 0 on success, negative error code on failure.
1941  */
1942 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1943 {
1944 	int i, r;
1945 
1946 	amdgpu_device_enable_virtual_display(adev);
1947 
1948 	if (amdgpu_sriov_vf(adev)) {
1949 		r = amdgpu_virt_request_full_gpu(adev, true);
1950 		if (r)
1951 			return r;
1952 	}
1953 
1954 	switch (adev->asic_type) {
1955 #ifdef CONFIG_DRM_AMDGPU_SI
1956 	case CHIP_VERDE:
1957 	case CHIP_TAHITI:
1958 	case CHIP_PITCAIRN:
1959 	case CHIP_OLAND:
1960 	case CHIP_HAINAN:
1961 		adev->family = AMDGPU_FAMILY_SI;
1962 		r = si_set_ip_blocks(adev);
1963 		if (r)
1964 			return r;
1965 		break;
1966 #endif
1967 #ifdef CONFIG_DRM_AMDGPU_CIK
1968 	case CHIP_BONAIRE:
1969 	case CHIP_HAWAII:
1970 	case CHIP_KAVERI:
1971 	case CHIP_KABINI:
1972 	case CHIP_MULLINS:
1973 		if (adev->flags & AMD_IS_APU)
1974 			adev->family = AMDGPU_FAMILY_KV;
1975 		else
1976 			adev->family = AMDGPU_FAMILY_CI;
1977 
1978 		r = cik_set_ip_blocks(adev);
1979 		if (r)
1980 			return r;
1981 		break;
1982 #endif
1983 	case CHIP_TOPAZ:
1984 	case CHIP_TONGA:
1985 	case CHIP_FIJI:
1986 	case CHIP_POLARIS10:
1987 	case CHIP_POLARIS11:
1988 	case CHIP_POLARIS12:
1989 	case CHIP_VEGAM:
1990 	case CHIP_CARRIZO:
1991 	case CHIP_STONEY:
1992 		if (adev->flags & AMD_IS_APU)
1993 			adev->family = AMDGPU_FAMILY_CZ;
1994 		else
1995 			adev->family = AMDGPU_FAMILY_VI;
1996 
1997 		r = vi_set_ip_blocks(adev);
1998 		if (r)
1999 			return r;
2000 		break;
2001 	case CHIP_VEGA10:
2002 	case CHIP_VEGA12:
2003 	case CHIP_VEGA20:
2004 	case CHIP_RAVEN:
2005 	case CHIP_ARCTURUS:
2006 	case CHIP_RENOIR:
2007 	case CHIP_ALDEBARAN:
2008 		if (adev->flags & AMD_IS_APU)
2009 			adev->family = AMDGPU_FAMILY_RV;
2010 		else
2011 			adev->family = AMDGPU_FAMILY_AI;
2012 
2013 		r = soc15_set_ip_blocks(adev);
2014 		if (r)
2015 			return r;
2016 		break;
2017 	case  CHIP_NAVI10:
2018 	case  CHIP_NAVI14:
2019 	case  CHIP_NAVI12:
2020 	case  CHIP_SIENNA_CICHLID:
2021 	case  CHIP_NAVY_FLOUNDER:
2022 	case  CHIP_DIMGREY_CAVEFISH:
2023 	case CHIP_VANGOGH:
2024 		if (adev->asic_type == CHIP_VANGOGH)
2025 			adev->family = AMDGPU_FAMILY_VGH;
2026 		else
2027 			adev->family = AMDGPU_FAMILY_NV;
2028 
2029 		r = nv_set_ip_blocks(adev);
2030 		if (r)
2031 			return r;
2032 		break;
2033 	default:
2034 		/* FIXME: not supported yet */
2035 		return -EINVAL;
2036 	}
2037 
2038 	amdgpu_amdkfd_device_probe(adev);
2039 
2040 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2041 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2042 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2043 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2044 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2045 
2046 	for (i = 0; i < adev->num_ip_blocks; i++) {
2047 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2048 			DRM_ERROR("disabled ip block: %d <%s>\n",
2049 				  i, adev->ip_blocks[i].version->funcs->name);
2050 			adev->ip_blocks[i].status.valid = false;
2051 		} else {
2052 			if (adev->ip_blocks[i].version->funcs->early_init) {
2053 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2054 				if (r == -ENOENT) {
2055 					adev->ip_blocks[i].status.valid = false;
2056 				} else if (r) {
2057 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2058 						  adev->ip_blocks[i].version->funcs->name, r);
2059 					return r;
2060 				} else {
2061 					adev->ip_blocks[i].status.valid = true;
2062 				}
2063 			} else {
2064 				adev->ip_blocks[i].status.valid = true;
2065 			}
2066 		}
2067 		/* get the vbios after the asic_funcs are set up */
2068 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2069 			r = amdgpu_device_parse_gpu_info_fw(adev);
2070 			if (r)
2071 				return r;
2072 
2073 			/* Read BIOS */
2074 			if (!amdgpu_get_bios(adev))
2075 				return -EINVAL;
2076 
2077 			r = amdgpu_atombios_init(adev);
2078 			if (r) {
2079 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2080 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2081 				return r;
2082 			}
2083 
2084 			/*get pf2vf msg info at it's earliest time*/
2085 			if (amdgpu_sriov_vf(adev))
2086 				amdgpu_virt_init_data_exchange(adev);
2087 
2088 		}
2089 	}
2090 
2091 	adev->cg_flags &= amdgpu_cg_mask;
2092 	adev->pg_flags &= amdgpu_pg_mask;
2093 
2094 	return 0;
2095 }
2096 
2097 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2098 {
2099 	int i, r;
2100 
2101 	for (i = 0; i < adev->num_ip_blocks; i++) {
2102 		if (!adev->ip_blocks[i].status.sw)
2103 			continue;
2104 		if (adev->ip_blocks[i].status.hw)
2105 			continue;
2106 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2107 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2108 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2109 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2110 			if (r) {
2111 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2112 					  adev->ip_blocks[i].version->funcs->name, r);
2113 				return r;
2114 			}
2115 			adev->ip_blocks[i].status.hw = true;
2116 		}
2117 	}
2118 
2119 	return 0;
2120 }
2121 
2122 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2123 {
2124 	int i, r;
2125 
2126 	for (i = 0; i < adev->num_ip_blocks; i++) {
2127 		if (!adev->ip_blocks[i].status.sw)
2128 			continue;
2129 		if (adev->ip_blocks[i].status.hw)
2130 			continue;
2131 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2132 		if (r) {
2133 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2134 				  adev->ip_blocks[i].version->funcs->name, r);
2135 			return r;
2136 		}
2137 		adev->ip_blocks[i].status.hw = true;
2138 	}
2139 
2140 	return 0;
2141 }
2142 
2143 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2144 {
2145 	int r = 0;
2146 	int i;
2147 	uint32_t smu_version;
2148 
2149 	if (adev->asic_type >= CHIP_VEGA10) {
2150 		for (i = 0; i < adev->num_ip_blocks; i++) {
2151 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2152 				continue;
2153 
2154 			if (!adev->ip_blocks[i].status.sw)
2155 				continue;
2156 
2157 			/* no need to do the fw loading again if already done*/
2158 			if (adev->ip_blocks[i].status.hw == true)
2159 				break;
2160 
2161 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2162 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2163 				if (r) {
2164 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2165 							  adev->ip_blocks[i].version->funcs->name, r);
2166 					return r;
2167 				}
2168 			} else {
2169 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2170 				if (r) {
2171 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2172 							  adev->ip_blocks[i].version->funcs->name, r);
2173 					return r;
2174 				}
2175 			}
2176 
2177 			adev->ip_blocks[i].status.hw = true;
2178 			break;
2179 		}
2180 	}
2181 
2182 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2183 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2184 
2185 	return r;
2186 }
2187 
2188 /**
2189  * amdgpu_device_ip_init - run init for hardware IPs
2190  *
2191  * @adev: amdgpu_device pointer
2192  *
2193  * Main initialization pass for hardware IPs.  The list of all the hardware
2194  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2195  * are run.  sw_init initializes the software state associated with each IP
2196  * and hw_init initializes the hardware associated with each IP.
2197  * Returns 0 on success, negative error code on failure.
2198  */
2199 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2200 {
2201 	int i, r;
2202 
2203 	r = amdgpu_ras_init(adev);
2204 	if (r)
2205 		return r;
2206 
2207 	for (i = 0; i < adev->num_ip_blocks; i++) {
2208 		if (!adev->ip_blocks[i].status.valid)
2209 			continue;
2210 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2211 		if (r) {
2212 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2213 				  adev->ip_blocks[i].version->funcs->name, r);
2214 			goto init_failed;
2215 		}
2216 		adev->ip_blocks[i].status.sw = true;
2217 
2218 		/* need to do gmc hw init early so we can allocate gpu mem */
2219 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2220 			r = amdgpu_device_vram_scratch_init(adev);
2221 			if (r) {
2222 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2223 				goto init_failed;
2224 			}
2225 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2226 			if (r) {
2227 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2228 				goto init_failed;
2229 			}
2230 			r = amdgpu_device_wb_init(adev);
2231 			if (r) {
2232 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2233 				goto init_failed;
2234 			}
2235 			adev->ip_blocks[i].status.hw = true;
2236 
2237 			/* right after GMC hw init, we create CSA */
2238 			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2239 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2240 								AMDGPU_GEM_DOMAIN_VRAM,
2241 								AMDGPU_CSA_SIZE);
2242 				if (r) {
2243 					DRM_ERROR("allocate CSA failed %d\n", r);
2244 					goto init_failed;
2245 				}
2246 			}
2247 		}
2248 	}
2249 
2250 	if (amdgpu_sriov_vf(adev))
2251 		amdgpu_virt_init_data_exchange(adev);
2252 
2253 	r = amdgpu_ib_pool_init(adev);
2254 	if (r) {
2255 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2256 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2257 		goto init_failed;
2258 	}
2259 
2260 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2261 	if (r)
2262 		goto init_failed;
2263 
2264 	r = amdgpu_device_ip_hw_init_phase1(adev);
2265 	if (r)
2266 		goto init_failed;
2267 
2268 	r = amdgpu_device_fw_loading(adev);
2269 	if (r)
2270 		goto init_failed;
2271 
2272 	r = amdgpu_device_ip_hw_init_phase2(adev);
2273 	if (r)
2274 		goto init_failed;
2275 
2276 	/*
2277 	 * retired pages will be loaded from eeprom and reserved here,
2278 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2279 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2280 	 * for I2C communication which only true at this point.
2281 	 *
2282 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2283 	 * failure from bad gpu situation and stop amdgpu init process
2284 	 * accordingly. For other failed cases, it will still release all
2285 	 * the resource and print error message, rather than returning one
2286 	 * negative value to upper level.
2287 	 *
2288 	 * Note: theoretically, this should be called before all vram allocations
2289 	 * to protect retired page from abusing
2290 	 */
2291 	r = amdgpu_ras_recovery_init(adev);
2292 	if (r)
2293 		goto init_failed;
2294 
2295 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2296 		amdgpu_xgmi_add_device(adev);
2297 
2298 	/* Don't init kfd if whole hive need to be reset during init */
2299 	if (!adev->gmc.xgmi.pending_reset)
2300 		amdgpu_amdkfd_device_init(adev);
2301 
2302 	amdgpu_fru_get_product_info(adev);
2303 
2304 init_failed:
2305 	if (amdgpu_sriov_vf(adev))
2306 		amdgpu_virt_release_full_gpu(adev, true);
2307 
2308 	return r;
2309 }
2310 
2311 /**
2312  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2313  *
2314  * @adev: amdgpu_device pointer
2315  *
2316  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2317  * this function before a GPU reset.  If the value is retained after a
2318  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2319  */
2320 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2321 {
2322 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2323 }
2324 
2325 /**
2326  * amdgpu_device_check_vram_lost - check if vram is valid
2327  *
2328  * @adev: amdgpu_device pointer
2329  *
2330  * Checks the reset magic value written to the gart pointer in VRAM.
2331  * The driver calls this after a GPU reset to see if the contents of
2332  * VRAM is lost or now.
2333  * returns true if vram is lost, false if not.
2334  */
2335 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2336 {
2337 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2338 			AMDGPU_RESET_MAGIC_NUM))
2339 		return true;
2340 
2341 	if (!amdgpu_in_reset(adev))
2342 		return false;
2343 
2344 	/*
2345 	 * For all ASICs with baco/mode1 reset, the VRAM is
2346 	 * always assumed to be lost.
2347 	 */
2348 	switch (amdgpu_asic_reset_method(adev)) {
2349 	case AMD_RESET_METHOD_BACO:
2350 	case AMD_RESET_METHOD_MODE1:
2351 		return true;
2352 	default:
2353 		return false;
2354 	}
2355 }
2356 
2357 /**
2358  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2359  *
2360  * @adev: amdgpu_device pointer
2361  * @state: clockgating state (gate or ungate)
2362  *
2363  * The list of all the hardware IPs that make up the asic is walked and the
2364  * set_clockgating_state callbacks are run.
2365  * Late initialization pass enabling clockgating for hardware IPs.
2366  * Fini or suspend, pass disabling clockgating for hardware IPs.
2367  * Returns 0 on success, negative error code on failure.
2368  */
2369 
2370 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2371 			       enum amd_clockgating_state state)
2372 {
2373 	int i, j, r;
2374 
2375 	if (amdgpu_emu_mode == 1)
2376 		return 0;
2377 
2378 	for (j = 0; j < adev->num_ip_blocks; j++) {
2379 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2380 		if (!adev->ip_blocks[i].status.late_initialized)
2381 			continue;
2382 		/* skip CG for GFX on S0ix */
2383 		if (adev->in_s0ix &&
2384 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2385 			continue;
2386 		/* skip CG for VCE/UVD, it's handled specially */
2387 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2388 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2389 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2390 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2391 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2392 			/* enable clockgating to save power */
2393 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2394 										     state);
2395 			if (r) {
2396 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2397 					  adev->ip_blocks[i].version->funcs->name, r);
2398 				return r;
2399 			}
2400 		}
2401 	}
2402 
2403 	return 0;
2404 }
2405 
2406 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2407 			       enum amd_powergating_state state)
2408 {
2409 	int i, j, r;
2410 
2411 	if (amdgpu_emu_mode == 1)
2412 		return 0;
2413 
2414 	for (j = 0; j < adev->num_ip_blocks; j++) {
2415 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2416 		if (!adev->ip_blocks[i].status.late_initialized)
2417 			continue;
2418 		/* skip PG for GFX on S0ix */
2419 		if (adev->in_s0ix &&
2420 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2421 			continue;
2422 		/* skip CG for VCE/UVD, it's handled specially */
2423 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2424 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2425 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2426 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2427 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2428 			/* enable powergating to save power */
2429 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2430 											state);
2431 			if (r) {
2432 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2433 					  adev->ip_blocks[i].version->funcs->name, r);
2434 				return r;
2435 			}
2436 		}
2437 	}
2438 	return 0;
2439 }
2440 
2441 static int amdgpu_device_enable_mgpu_fan_boost(void)
2442 {
2443 	struct amdgpu_gpu_instance *gpu_ins;
2444 	struct amdgpu_device *adev;
2445 	int i, ret = 0;
2446 
2447 	mutex_lock(&mgpu_info.mutex);
2448 
2449 	/*
2450 	 * MGPU fan boost feature should be enabled
2451 	 * only when there are two or more dGPUs in
2452 	 * the system
2453 	 */
2454 	if (mgpu_info.num_dgpu < 2)
2455 		goto out;
2456 
2457 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2458 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2459 		adev = gpu_ins->adev;
2460 		if (!(adev->flags & AMD_IS_APU) &&
2461 		    !gpu_ins->mgpu_fan_enabled) {
2462 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2463 			if (ret)
2464 				break;
2465 
2466 			gpu_ins->mgpu_fan_enabled = 1;
2467 		}
2468 	}
2469 
2470 out:
2471 	mutex_unlock(&mgpu_info.mutex);
2472 
2473 	return ret;
2474 }
2475 
2476 /**
2477  * amdgpu_device_ip_late_init - run late init for hardware IPs
2478  *
2479  * @adev: amdgpu_device pointer
2480  *
2481  * Late initialization pass for hardware IPs.  The list of all the hardware
2482  * IPs that make up the asic is walked and the late_init callbacks are run.
2483  * late_init covers any special initialization that an IP requires
2484  * after all of the have been initialized or something that needs to happen
2485  * late in the init process.
2486  * Returns 0 on success, negative error code on failure.
2487  */
2488 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2489 {
2490 	struct amdgpu_gpu_instance *gpu_instance;
2491 	int i = 0, r;
2492 
2493 	for (i = 0; i < adev->num_ip_blocks; i++) {
2494 		if (!adev->ip_blocks[i].status.hw)
2495 			continue;
2496 		if (adev->ip_blocks[i].version->funcs->late_init) {
2497 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2498 			if (r) {
2499 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2500 					  adev->ip_blocks[i].version->funcs->name, r);
2501 				return r;
2502 			}
2503 		}
2504 		adev->ip_blocks[i].status.late_initialized = true;
2505 	}
2506 
2507 	amdgpu_ras_set_error_query_ready(adev, true);
2508 
2509 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2510 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2511 
2512 	amdgpu_device_fill_reset_magic(adev);
2513 
2514 	r = amdgpu_device_enable_mgpu_fan_boost();
2515 	if (r)
2516 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2517 
2518 	/* For XGMI + passthrough configuration on arcturus, enable light SBR */
2519 	if (adev->asic_type == CHIP_ARCTURUS &&
2520 	    amdgpu_passthrough(adev) &&
2521 	    adev->gmc.xgmi.num_physical_nodes > 1)
2522 		smu_set_light_sbr(&adev->smu, true);
2523 
2524 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2525 		mutex_lock(&mgpu_info.mutex);
2526 
2527 		/*
2528 		 * Reset device p-state to low as this was booted with high.
2529 		 *
2530 		 * This should be performed only after all devices from the same
2531 		 * hive get initialized.
2532 		 *
2533 		 * However, it's unknown how many device in the hive in advance.
2534 		 * As this is counted one by one during devices initializations.
2535 		 *
2536 		 * So, we wait for all XGMI interlinked devices initialized.
2537 		 * This may bring some delays as those devices may come from
2538 		 * different hives. But that should be OK.
2539 		 */
2540 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2541 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2542 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2543 				if (gpu_instance->adev->flags & AMD_IS_APU)
2544 					continue;
2545 
2546 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2547 						AMDGPU_XGMI_PSTATE_MIN);
2548 				if (r) {
2549 					DRM_ERROR("pstate setting failed (%d).\n", r);
2550 					break;
2551 				}
2552 			}
2553 		}
2554 
2555 		mutex_unlock(&mgpu_info.mutex);
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 /**
2562  * amdgpu_device_ip_fini - run fini for hardware IPs
2563  *
2564  * @adev: amdgpu_device pointer
2565  *
2566  * Main teardown pass for hardware IPs.  The list of all the hardware
2567  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2568  * are run.  hw_fini tears down the hardware associated with each IP
2569  * and sw_fini tears down any software state associated with each IP.
2570  * Returns 0 on success, negative error code on failure.
2571  */
2572 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2573 {
2574 	int i, r;
2575 
2576 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2577 		amdgpu_virt_release_ras_err_handler_data(adev);
2578 
2579 	amdgpu_ras_pre_fini(adev);
2580 
2581 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2582 		amdgpu_xgmi_remove_device(adev);
2583 
2584 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2585 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2586 
2587 	amdgpu_amdkfd_device_fini(adev);
2588 
2589 	/* need to disable SMC first */
2590 	for (i = 0; i < adev->num_ip_blocks; i++) {
2591 		if (!adev->ip_blocks[i].status.hw)
2592 			continue;
2593 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2594 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2595 			/* XXX handle errors */
2596 			if (r) {
2597 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2598 					  adev->ip_blocks[i].version->funcs->name, r);
2599 			}
2600 			adev->ip_blocks[i].status.hw = false;
2601 			break;
2602 		}
2603 	}
2604 
2605 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2606 		if (!adev->ip_blocks[i].status.hw)
2607 			continue;
2608 
2609 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2610 		/* XXX handle errors */
2611 		if (r) {
2612 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2613 				  adev->ip_blocks[i].version->funcs->name, r);
2614 		}
2615 
2616 		adev->ip_blocks[i].status.hw = false;
2617 	}
2618 
2619 
2620 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2621 		if (!adev->ip_blocks[i].status.sw)
2622 			continue;
2623 
2624 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2625 			amdgpu_ucode_free_bo(adev);
2626 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2627 			amdgpu_device_wb_fini(adev);
2628 			amdgpu_device_vram_scratch_fini(adev);
2629 			amdgpu_ib_pool_fini(adev);
2630 		}
2631 
2632 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2633 		/* XXX handle errors */
2634 		if (r) {
2635 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2636 				  adev->ip_blocks[i].version->funcs->name, r);
2637 		}
2638 		adev->ip_blocks[i].status.sw = false;
2639 		adev->ip_blocks[i].status.valid = false;
2640 	}
2641 
2642 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2643 		if (!adev->ip_blocks[i].status.late_initialized)
2644 			continue;
2645 		if (adev->ip_blocks[i].version->funcs->late_fini)
2646 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2647 		adev->ip_blocks[i].status.late_initialized = false;
2648 	}
2649 
2650 	amdgpu_ras_fini(adev);
2651 
2652 	if (amdgpu_sriov_vf(adev))
2653 		if (amdgpu_virt_release_full_gpu(adev, false))
2654 			DRM_ERROR("failed to release exclusive mode on fini\n");
2655 
2656 	return 0;
2657 }
2658 
2659 /**
2660  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2661  *
2662  * @work: work_struct.
2663  */
2664 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2665 {
2666 	struct amdgpu_device *adev =
2667 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2668 	int r;
2669 
2670 	r = amdgpu_ib_ring_tests(adev);
2671 	if (r)
2672 		DRM_ERROR("ib ring test failed (%d).\n", r);
2673 }
2674 
2675 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2676 {
2677 	struct amdgpu_device *adev =
2678 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2679 
2680 	mutex_lock(&adev->gfx.gfx_off_mutex);
2681 	if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
2682 		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2683 			adev->gfx.gfx_off_state = true;
2684 	}
2685 	mutex_unlock(&adev->gfx.gfx_off_mutex);
2686 }
2687 
2688 /**
2689  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2690  *
2691  * @adev: amdgpu_device pointer
2692  *
2693  * Main suspend function for hardware IPs.  The list of all the hardware
2694  * IPs that make up the asic is walked, clockgating is disabled and the
2695  * suspend callbacks are run.  suspend puts the hardware and software state
2696  * in each IP into a state suitable for suspend.
2697  * Returns 0 on success, negative error code on failure.
2698  */
2699 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2700 {
2701 	int i, r;
2702 
2703 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2704 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2705 
2706 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2707 		if (!adev->ip_blocks[i].status.valid)
2708 			continue;
2709 
2710 		/* displays are handled separately */
2711 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2712 			continue;
2713 
2714 		/* XXX handle errors */
2715 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2716 		/* XXX handle errors */
2717 		if (r) {
2718 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2719 				  adev->ip_blocks[i].version->funcs->name, r);
2720 			return r;
2721 		}
2722 
2723 		adev->ip_blocks[i].status.hw = false;
2724 	}
2725 
2726 	return 0;
2727 }
2728 
2729 /**
2730  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2731  *
2732  * @adev: amdgpu_device pointer
2733  *
2734  * Main suspend function for hardware IPs.  The list of all the hardware
2735  * IPs that make up the asic is walked, clockgating is disabled and the
2736  * suspend callbacks are run.  suspend puts the hardware and software state
2737  * in each IP into a state suitable for suspend.
2738  * Returns 0 on success, negative error code on failure.
2739  */
2740 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2741 {
2742 	int i, r;
2743 
2744 	if (adev->in_s0ix)
2745 		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2746 
2747 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2748 		if (!adev->ip_blocks[i].status.valid)
2749 			continue;
2750 		/* displays are handled in phase1 */
2751 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2752 			continue;
2753 		/* PSP lost connection when err_event_athub occurs */
2754 		if (amdgpu_ras_intr_triggered() &&
2755 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2756 			adev->ip_blocks[i].status.hw = false;
2757 			continue;
2758 		}
2759 
2760 		/* skip unnecessary suspend if we do not initialize them yet */
2761 		if (adev->gmc.xgmi.pending_reset &&
2762 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2763 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2764 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2765 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2766 			adev->ip_blocks[i].status.hw = false;
2767 			continue;
2768 		}
2769 
2770 		/* skip suspend of gfx and psp for S0ix
2771 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2772 		 * like at runtime. PSP is also part of the always on hardware
2773 		 * so no need to suspend it.
2774 		 */
2775 		if (adev->in_s0ix &&
2776 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2777 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2778 			continue;
2779 
2780 		/* XXX handle errors */
2781 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2782 		/* XXX handle errors */
2783 		if (r) {
2784 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2785 				  adev->ip_blocks[i].version->funcs->name, r);
2786 		}
2787 		adev->ip_blocks[i].status.hw = false;
2788 		/* handle putting the SMC in the appropriate state */
2789 		if(!amdgpu_sriov_vf(adev)){
2790 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2791 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2792 				if (r) {
2793 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2794 							adev->mp1_state, r);
2795 					return r;
2796 				}
2797 			}
2798 		}
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 /**
2805  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2806  *
2807  * @adev: amdgpu_device pointer
2808  *
2809  * Main suspend function for hardware IPs.  The list of all the hardware
2810  * IPs that make up the asic is walked, clockgating is disabled and the
2811  * suspend callbacks are run.  suspend puts the hardware and software state
2812  * in each IP into a state suitable for suspend.
2813  * Returns 0 on success, negative error code on failure.
2814  */
2815 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2816 {
2817 	int r;
2818 
2819 	if (amdgpu_sriov_vf(adev)) {
2820 		amdgpu_virt_fini_data_exchange(adev);
2821 		amdgpu_virt_request_full_gpu(adev, false);
2822 	}
2823 
2824 	r = amdgpu_device_ip_suspend_phase1(adev);
2825 	if (r)
2826 		return r;
2827 	r = amdgpu_device_ip_suspend_phase2(adev);
2828 
2829 	if (amdgpu_sriov_vf(adev))
2830 		amdgpu_virt_release_full_gpu(adev, false);
2831 
2832 	return r;
2833 }
2834 
2835 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2836 {
2837 	int i, r;
2838 
2839 	static enum amd_ip_block_type ip_order[] = {
2840 		AMD_IP_BLOCK_TYPE_GMC,
2841 		AMD_IP_BLOCK_TYPE_COMMON,
2842 		AMD_IP_BLOCK_TYPE_PSP,
2843 		AMD_IP_BLOCK_TYPE_IH,
2844 	};
2845 
2846 	for (i = 0; i < adev->num_ip_blocks; i++) {
2847 		int j;
2848 		struct amdgpu_ip_block *block;
2849 
2850 		block = &adev->ip_blocks[i];
2851 		block->status.hw = false;
2852 
2853 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2854 
2855 			if (block->version->type != ip_order[j] ||
2856 				!block->status.valid)
2857 				continue;
2858 
2859 			r = block->version->funcs->hw_init(adev);
2860 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2861 			if (r)
2862 				return r;
2863 			block->status.hw = true;
2864 		}
2865 	}
2866 
2867 	return 0;
2868 }
2869 
2870 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2871 {
2872 	int i, r;
2873 
2874 	static enum amd_ip_block_type ip_order[] = {
2875 		AMD_IP_BLOCK_TYPE_SMC,
2876 		AMD_IP_BLOCK_TYPE_DCE,
2877 		AMD_IP_BLOCK_TYPE_GFX,
2878 		AMD_IP_BLOCK_TYPE_SDMA,
2879 		AMD_IP_BLOCK_TYPE_UVD,
2880 		AMD_IP_BLOCK_TYPE_VCE,
2881 		AMD_IP_BLOCK_TYPE_VCN
2882 	};
2883 
2884 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
2885 		int j;
2886 		struct amdgpu_ip_block *block;
2887 
2888 		for (j = 0; j < adev->num_ip_blocks; j++) {
2889 			block = &adev->ip_blocks[j];
2890 
2891 			if (block->version->type != ip_order[i] ||
2892 				!block->status.valid ||
2893 				block->status.hw)
2894 				continue;
2895 
2896 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
2897 				r = block->version->funcs->resume(adev);
2898 			else
2899 				r = block->version->funcs->hw_init(adev);
2900 
2901 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2902 			if (r)
2903 				return r;
2904 			block->status.hw = true;
2905 		}
2906 	}
2907 
2908 	return 0;
2909 }
2910 
2911 /**
2912  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2913  *
2914  * @adev: amdgpu_device pointer
2915  *
2916  * First resume function for hardware IPs.  The list of all the hardware
2917  * IPs that make up the asic is walked and the resume callbacks are run for
2918  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
2919  * after a suspend and updates the software state as necessary.  This
2920  * function is also used for restoring the GPU after a GPU reset.
2921  * Returns 0 on success, negative error code on failure.
2922  */
2923 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
2924 {
2925 	int i, r;
2926 
2927 	for (i = 0; i < adev->num_ip_blocks; i++) {
2928 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2929 			continue;
2930 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2931 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2932 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2933 
2934 			r = adev->ip_blocks[i].version->funcs->resume(adev);
2935 			if (r) {
2936 				DRM_ERROR("resume of IP block <%s> failed %d\n",
2937 					  adev->ip_blocks[i].version->funcs->name, r);
2938 				return r;
2939 			}
2940 			adev->ip_blocks[i].status.hw = true;
2941 		}
2942 	}
2943 
2944 	return 0;
2945 }
2946 
2947 /**
2948  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2949  *
2950  * @adev: amdgpu_device pointer
2951  *
2952  * First resume function for hardware IPs.  The list of all the hardware
2953  * IPs that make up the asic is walked and the resume callbacks are run for
2954  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
2955  * functional state after a suspend and updates the software state as
2956  * necessary.  This function is also used for restoring the GPU after a GPU
2957  * reset.
2958  * Returns 0 on success, negative error code on failure.
2959  */
2960 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2961 {
2962 	int i, r;
2963 
2964 	for (i = 0; i < adev->num_ip_blocks; i++) {
2965 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2966 			continue;
2967 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2968 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2969 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2970 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2971 			continue;
2972 		r = adev->ip_blocks[i].version->funcs->resume(adev);
2973 		if (r) {
2974 			DRM_ERROR("resume of IP block <%s> failed %d\n",
2975 				  adev->ip_blocks[i].version->funcs->name, r);
2976 			return r;
2977 		}
2978 		adev->ip_blocks[i].status.hw = true;
2979 	}
2980 
2981 	return 0;
2982 }
2983 
2984 /**
2985  * amdgpu_device_ip_resume - run resume for hardware IPs
2986  *
2987  * @adev: amdgpu_device pointer
2988  *
2989  * Main resume function for hardware IPs.  The hardware IPs
2990  * are split into two resume functions because they are
2991  * are also used in in recovering from a GPU reset and some additional
2992  * steps need to be take between them.  In this case (S3/S4) they are
2993  * run sequentially.
2994  * Returns 0 on success, negative error code on failure.
2995  */
2996 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2997 {
2998 	int r;
2999 
3000 	r = amdgpu_device_ip_resume_phase1(adev);
3001 	if (r)
3002 		return r;
3003 
3004 	r = amdgpu_device_fw_loading(adev);
3005 	if (r)
3006 		return r;
3007 
3008 	r = amdgpu_device_ip_resume_phase2(adev);
3009 
3010 	return r;
3011 }
3012 
3013 /**
3014  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3015  *
3016  * @adev: amdgpu_device pointer
3017  *
3018  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3019  */
3020 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3021 {
3022 	if (amdgpu_sriov_vf(adev)) {
3023 		if (adev->is_atom_fw) {
3024 			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
3025 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3026 		} else {
3027 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3028 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3029 		}
3030 
3031 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3032 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3033 	}
3034 }
3035 
3036 /**
3037  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3038  *
3039  * @asic_type: AMD asic type
3040  *
3041  * Check if there is DC (new modesetting infrastructre) support for an asic.
3042  * returns true if DC has support, false if not.
3043  */
3044 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3045 {
3046 	switch (asic_type) {
3047 #if defined(CONFIG_DRM_AMD_DC)
3048 #if defined(CONFIG_DRM_AMD_DC_SI)
3049 	case CHIP_TAHITI:
3050 	case CHIP_PITCAIRN:
3051 	case CHIP_VERDE:
3052 	case CHIP_OLAND:
3053 #endif
3054 	case CHIP_BONAIRE:
3055 	case CHIP_KAVERI:
3056 	case CHIP_KABINI:
3057 	case CHIP_MULLINS:
3058 		/*
3059 		 * We have systems in the wild with these ASICs that require
3060 		 * LVDS and VGA support which is not supported with DC.
3061 		 *
3062 		 * Fallback to the non-DC driver here by default so as not to
3063 		 * cause regressions.
3064 		 */
3065 		return amdgpu_dc > 0;
3066 	case CHIP_HAWAII:
3067 	case CHIP_CARRIZO:
3068 	case CHIP_STONEY:
3069 	case CHIP_POLARIS10:
3070 	case CHIP_POLARIS11:
3071 	case CHIP_POLARIS12:
3072 	case CHIP_VEGAM:
3073 	case CHIP_TONGA:
3074 	case CHIP_FIJI:
3075 	case CHIP_VEGA10:
3076 	case CHIP_VEGA12:
3077 	case CHIP_VEGA20:
3078 #if defined(CONFIG_DRM_AMD_DC_DCN)
3079 	case CHIP_RAVEN:
3080 	case CHIP_NAVI10:
3081 	case CHIP_NAVI14:
3082 	case CHIP_NAVI12:
3083 	case CHIP_RENOIR:
3084 	case CHIP_SIENNA_CICHLID:
3085 	case CHIP_NAVY_FLOUNDER:
3086 	case CHIP_DIMGREY_CAVEFISH:
3087 	case CHIP_VANGOGH:
3088 #endif
3089 		return amdgpu_dc != 0;
3090 #endif
3091 	default:
3092 		if (amdgpu_dc > 0)
3093 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3094 					 "but isn't supported by ASIC, ignoring\n");
3095 		return false;
3096 	}
3097 }
3098 
3099 /**
3100  * amdgpu_device_has_dc_support - check if dc is supported
3101  *
3102  * @adev: amdgpu_device pointer
3103  *
3104  * Returns true for supported, false for not supported
3105  */
3106 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3107 {
3108 	if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
3109 		return false;
3110 
3111 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3112 }
3113 
3114 
3115 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3116 {
3117 	struct amdgpu_device *adev =
3118 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3119 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3120 
3121 	/* It's a bug to not have a hive within this function */
3122 	if (WARN_ON(!hive))
3123 		return;
3124 
3125 	/*
3126 	 * Use task barrier to synchronize all xgmi reset works across the
3127 	 * hive. task_barrier_enter and task_barrier_exit will block
3128 	 * until all the threads running the xgmi reset works reach
3129 	 * those points. task_barrier_full will do both blocks.
3130 	 */
3131 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3132 
3133 		task_barrier_enter(&hive->tb);
3134 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3135 
3136 		if (adev->asic_reset_res)
3137 			goto fail;
3138 
3139 		task_barrier_exit(&hive->tb);
3140 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3141 
3142 		if (adev->asic_reset_res)
3143 			goto fail;
3144 
3145 		if (adev->mmhub.ras_funcs &&
3146 		    adev->mmhub.ras_funcs->reset_ras_error_count)
3147 			adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3148 	} else {
3149 
3150 		task_barrier_full(&hive->tb);
3151 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3152 	}
3153 
3154 fail:
3155 	if (adev->asic_reset_res)
3156 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3157 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3158 	amdgpu_put_xgmi_hive(hive);
3159 }
3160 
3161 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3162 {
3163 	char *input = amdgpu_lockup_timeout;
3164 	char *timeout_setting = NULL;
3165 	int index = 0;
3166 	long timeout;
3167 	int ret = 0;
3168 
3169 	/*
3170 	 * By default timeout for non compute jobs is 10000
3171 	 * and 60000 for compute jobs.
3172 	 * In SR-IOV or passthrough mode, timeout for compute
3173 	 * jobs are 60000 by default.
3174 	 */
3175 	adev->gfx_timeout = msecs_to_jiffies(10000);
3176 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3177 	if (amdgpu_sriov_vf(adev))
3178 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3179 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3180 	else
3181 		adev->compute_timeout =  msecs_to_jiffies(60000);
3182 
3183 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3184 		while ((timeout_setting = strsep(&input, ",")) &&
3185 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3186 			ret = kstrtol(timeout_setting, 0, &timeout);
3187 			if (ret)
3188 				return ret;
3189 
3190 			if (timeout == 0) {
3191 				index++;
3192 				continue;
3193 			} else if (timeout < 0) {
3194 				timeout = MAX_SCHEDULE_TIMEOUT;
3195 			} else {
3196 				timeout = msecs_to_jiffies(timeout);
3197 			}
3198 
3199 			switch (index++) {
3200 			case 0:
3201 				adev->gfx_timeout = timeout;
3202 				break;
3203 			case 1:
3204 				adev->compute_timeout = timeout;
3205 				break;
3206 			case 2:
3207 				adev->sdma_timeout = timeout;
3208 				break;
3209 			case 3:
3210 				adev->video_timeout = timeout;
3211 				break;
3212 			default:
3213 				break;
3214 			}
3215 		}
3216 		/*
3217 		 * There is only one value specified and
3218 		 * it should apply to all non-compute jobs.
3219 		 */
3220 		if (index == 1) {
3221 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3222 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3223 				adev->compute_timeout = adev->gfx_timeout;
3224 		}
3225 	}
3226 
3227 	return ret;
3228 }
3229 
3230 static const struct attribute *amdgpu_dev_attributes[] = {
3231 	&dev_attr_product_name.attr,
3232 	&dev_attr_product_number.attr,
3233 	&dev_attr_serial_number.attr,
3234 	&dev_attr_pcie_replay_count.attr,
3235 	NULL
3236 };
3237 
3238 
3239 /**
3240  * amdgpu_device_init - initialize the driver
3241  *
3242  * @adev: amdgpu_device pointer
3243  * @flags: driver flags
3244  *
3245  * Initializes the driver info and hw (all asics).
3246  * Returns 0 for success or an error on failure.
3247  * Called at driver startup.
3248  */
3249 int amdgpu_device_init(struct amdgpu_device *adev,
3250 		       uint32_t flags)
3251 {
3252 	struct drm_device *ddev = adev_to_drm(adev);
3253 	struct pci_dev *pdev = adev->pdev;
3254 	int r, i;
3255 	bool px = false;
3256 	u32 max_MBps;
3257 
3258 	adev->shutdown = false;
3259 	adev->flags = flags;
3260 
3261 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3262 		adev->asic_type = amdgpu_force_asic_type;
3263 	else
3264 		adev->asic_type = flags & AMD_ASIC_MASK;
3265 
3266 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3267 	if (amdgpu_emu_mode == 1)
3268 		adev->usec_timeout *= 10;
3269 	adev->gmc.gart_size = 512 * 1024 * 1024;
3270 	adev->accel_working = false;
3271 	adev->num_rings = 0;
3272 	adev->mman.buffer_funcs = NULL;
3273 	adev->mman.buffer_funcs_ring = NULL;
3274 	adev->vm_manager.vm_pte_funcs = NULL;
3275 	adev->vm_manager.vm_pte_num_scheds = 0;
3276 	adev->gmc.gmc_funcs = NULL;
3277 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3278 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3279 
3280 	adev->smc_rreg = &amdgpu_invalid_rreg;
3281 	adev->smc_wreg = &amdgpu_invalid_wreg;
3282 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3283 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3284 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3285 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3286 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3287 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3288 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3289 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3290 	adev->didt_rreg = &amdgpu_invalid_rreg;
3291 	adev->didt_wreg = &amdgpu_invalid_wreg;
3292 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3293 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3294 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3295 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3296 
3297 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3298 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3299 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3300 
3301 	/* mutex initialization are all done here so we
3302 	 * can recall function without having locking issues */
3303 	mutex_init(&adev->firmware.mutex);
3304 	mutex_init(&adev->pm.mutex);
3305 	mutex_init(&adev->gfx.gpu_clock_mutex);
3306 	mutex_init(&adev->srbm_mutex);
3307 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3308 	mutex_init(&adev->gfx.gfx_off_mutex);
3309 	mutex_init(&adev->grbm_idx_mutex);
3310 	mutex_init(&adev->mn_lock);
3311 	mutex_init(&adev->virt.vf_errors.lock);
3312 	hash_init(adev->mn_hash);
3313 	atomic_set(&adev->in_gpu_reset, 0);
3314 	init_rwsem(&adev->reset_sem);
3315 	mutex_init(&adev->psp.mutex);
3316 	mutex_init(&adev->notifier_lock);
3317 
3318 	r = amdgpu_device_check_arguments(adev);
3319 	if (r)
3320 		return r;
3321 
3322 	spin_lock_init(&adev->mmio_idx_lock);
3323 	spin_lock_init(&adev->smc_idx_lock);
3324 	spin_lock_init(&adev->pcie_idx_lock);
3325 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3326 	spin_lock_init(&adev->didt_idx_lock);
3327 	spin_lock_init(&adev->gc_cac_idx_lock);
3328 	spin_lock_init(&adev->se_cac_idx_lock);
3329 	spin_lock_init(&adev->audio_endpt_idx_lock);
3330 	spin_lock_init(&adev->mm_stats.lock);
3331 
3332 	INIT_LIST_HEAD(&adev->shadow_list);
3333 	mutex_init(&adev->shadow_list_lock);
3334 
3335 	INIT_LIST_HEAD(&adev->reset_list);
3336 
3337 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3338 			  amdgpu_device_delayed_init_work_handler);
3339 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3340 			  amdgpu_device_delay_enable_gfx_off);
3341 
3342 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3343 
3344 	adev->gfx.gfx_off_req_count = 1;
3345 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3346 
3347 	atomic_set(&adev->throttling_logging_enabled, 1);
3348 	/*
3349 	 * If throttling continues, logging will be performed every minute
3350 	 * to avoid log flooding. "-1" is subtracted since the thermal
3351 	 * throttling interrupt comes every second. Thus, the total logging
3352 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3353 	 * for throttling interrupt) = 60 seconds.
3354 	 */
3355 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3356 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3357 
3358 	/* Registers mapping */
3359 	/* TODO: block userspace mapping of io register */
3360 	if (adev->asic_type >= CHIP_BONAIRE) {
3361 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3362 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3363 	} else {
3364 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3365 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3366 	}
3367 
3368 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3369 	if (adev->rmmio == NULL) {
3370 		return -ENOMEM;
3371 	}
3372 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3373 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3374 
3375 	/* enable PCIE atomic ops */
3376 	r = pci_enable_atomic_ops_to_root(adev->pdev,
3377 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3378 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3379 	if (r) {
3380 		adev->have_atomics_support = false;
3381 		DRM_INFO("PCIE atomic ops is not supported\n");
3382 	} else {
3383 		adev->have_atomics_support = true;
3384 	}
3385 
3386 	amdgpu_device_get_pcie_info(adev);
3387 
3388 	if (amdgpu_mcbp)
3389 		DRM_INFO("MCBP is enabled\n");
3390 
3391 	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3392 		adev->enable_mes = true;
3393 
3394 	/* detect hw virtualization here */
3395 	amdgpu_detect_virtualization(adev);
3396 
3397 	r = amdgpu_device_get_job_timeout_settings(adev);
3398 	if (r) {
3399 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3400 		goto failed_unmap;
3401 	}
3402 
3403 	/* early init functions */
3404 	r = amdgpu_device_ip_early_init(adev);
3405 	if (r)
3406 		goto failed_unmap;
3407 
3408 	/* doorbell bar mapping and doorbell index init*/
3409 	amdgpu_device_doorbell_init(adev);
3410 
3411 	if (amdgpu_emu_mode == 1) {
3412 		/* post the asic on emulation mode */
3413 		emu_soc_asic_init(adev);
3414 		goto fence_driver_init;
3415 	}
3416 
3417 	amdgpu_reset_init(adev);
3418 
3419 	/* detect if we are with an SRIOV vbios */
3420 	amdgpu_device_detect_sriov_bios(adev);
3421 
3422 	/* check if we need to reset the asic
3423 	 *  E.g., driver was not cleanly unloaded previously, etc.
3424 	 */
3425 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3426 		if (adev->gmc.xgmi.num_physical_nodes) {
3427 			dev_info(adev->dev, "Pending hive reset.\n");
3428 			adev->gmc.xgmi.pending_reset = true;
3429 			/* Only need to init necessary block for SMU to handle the reset */
3430 			for (i = 0; i < adev->num_ip_blocks; i++) {
3431 				if (!adev->ip_blocks[i].status.valid)
3432 					continue;
3433 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3434 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3435 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3436 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3437 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3438 						adev->ip_blocks[i].version->funcs->name);
3439 					adev->ip_blocks[i].status.hw = true;
3440 				}
3441 			}
3442 		} else {
3443 			r = amdgpu_asic_reset(adev);
3444 			if (r) {
3445 				dev_err(adev->dev, "asic reset on init failed\n");
3446 				goto failed;
3447 			}
3448 		}
3449 	}
3450 
3451 	pci_enable_pcie_error_reporting(adev->pdev);
3452 
3453 	/* Post card if necessary */
3454 	if (amdgpu_device_need_post(adev)) {
3455 		if (!adev->bios) {
3456 			dev_err(adev->dev, "no vBIOS found\n");
3457 			r = -EINVAL;
3458 			goto failed;
3459 		}
3460 		DRM_INFO("GPU posting now...\n");
3461 		r = amdgpu_device_asic_init(adev);
3462 		if (r) {
3463 			dev_err(adev->dev, "gpu post error!\n");
3464 			goto failed;
3465 		}
3466 	}
3467 
3468 	if (adev->is_atom_fw) {
3469 		/* Initialize clocks */
3470 		r = amdgpu_atomfirmware_get_clock_info(adev);
3471 		if (r) {
3472 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3473 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3474 			goto failed;
3475 		}
3476 	} else {
3477 		/* Initialize clocks */
3478 		r = amdgpu_atombios_get_clock_info(adev);
3479 		if (r) {
3480 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3481 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3482 			goto failed;
3483 		}
3484 		/* init i2c buses */
3485 		if (!amdgpu_device_has_dc_support(adev))
3486 			amdgpu_atombios_i2c_init(adev);
3487 	}
3488 
3489 fence_driver_init:
3490 	/* Fence driver */
3491 	r = amdgpu_fence_driver_init(adev);
3492 	if (r) {
3493 		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
3494 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3495 		goto failed;
3496 	}
3497 
3498 	/* init the mode config */
3499 	drm_mode_config_init(adev_to_drm(adev));
3500 
3501 	r = amdgpu_device_ip_init(adev);
3502 	if (r) {
3503 		/* failed in exclusive mode due to timeout */
3504 		if (amdgpu_sriov_vf(adev) &&
3505 		    !amdgpu_sriov_runtime(adev) &&
3506 		    amdgpu_virt_mmio_blocked(adev) &&
3507 		    !amdgpu_virt_wait_reset(adev)) {
3508 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3509 			/* Don't send request since VF is inactive. */
3510 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3511 			adev->virt.ops = NULL;
3512 			r = -EAGAIN;
3513 			goto release_ras_con;
3514 		}
3515 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3516 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3517 		goto release_ras_con;
3518 	}
3519 
3520 	dev_info(adev->dev,
3521 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3522 			adev->gfx.config.max_shader_engines,
3523 			adev->gfx.config.max_sh_per_se,
3524 			adev->gfx.config.max_cu_per_sh,
3525 			adev->gfx.cu_info.number);
3526 
3527 	adev->accel_working = true;
3528 
3529 	amdgpu_vm_check_compute_bug(adev);
3530 
3531 	/* Initialize the buffer migration limit. */
3532 	if (amdgpu_moverate >= 0)
3533 		max_MBps = amdgpu_moverate;
3534 	else
3535 		max_MBps = 8; /* Allow 8 MB/s. */
3536 	/* Get a log2 for easy divisions. */
3537 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3538 
3539 	amdgpu_fbdev_init(adev);
3540 
3541 	r = amdgpu_pm_sysfs_init(adev);
3542 	if (r) {
3543 		adev->pm_sysfs_en = false;
3544 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3545 	} else
3546 		adev->pm_sysfs_en = true;
3547 
3548 	r = amdgpu_ucode_sysfs_init(adev);
3549 	if (r) {
3550 		adev->ucode_sysfs_en = false;
3551 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3552 	} else
3553 		adev->ucode_sysfs_en = true;
3554 
3555 	if ((amdgpu_testing & 1)) {
3556 		if (adev->accel_working)
3557 			amdgpu_test_moves(adev);
3558 		else
3559 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3560 	}
3561 	if (amdgpu_benchmarking) {
3562 		if (adev->accel_working)
3563 			amdgpu_benchmark(adev, amdgpu_benchmarking);
3564 		else
3565 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3566 	}
3567 
3568 	/*
3569 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3570 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3571 	 * gpu instance is counted less.
3572 	 */
3573 	amdgpu_register_gpu_instance(adev);
3574 
3575 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3576 	 * explicit gating rather than handling it automatically.
3577 	 */
3578 	if (!adev->gmc.xgmi.pending_reset) {
3579 		r = amdgpu_device_ip_late_init(adev);
3580 		if (r) {
3581 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3582 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3583 			goto release_ras_con;
3584 		}
3585 		/* must succeed. */
3586 		amdgpu_ras_resume(adev);
3587 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3588 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3589 	}
3590 
3591 	if (amdgpu_sriov_vf(adev))
3592 		flush_delayed_work(&adev->delayed_init_work);
3593 
3594 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3595 	if (r)
3596 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3597 
3598 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3599 		r = amdgpu_pmu_init(adev);
3600 	if (r)
3601 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3602 
3603 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3604 	if (amdgpu_device_cache_pci_state(adev->pdev))
3605 		pci_restore_state(pdev);
3606 
3607 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3608 	/* this will fail for cards that aren't VGA class devices, just
3609 	 * ignore it */
3610 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3611 		vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
3612 
3613 	if (amdgpu_device_supports_px(ddev)) {
3614 		px = true;
3615 		vga_switcheroo_register_client(adev->pdev,
3616 					       &amdgpu_switcheroo_ops, px);
3617 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3618 	}
3619 
3620 	if (adev->gmc.xgmi.pending_reset)
3621 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3622 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3623 
3624 	return 0;
3625 
3626 release_ras_con:
3627 	amdgpu_release_ras_context(adev);
3628 
3629 failed:
3630 	amdgpu_vf_error_trans_all(adev);
3631 
3632 failed_unmap:
3633 	iounmap(adev->rmmio);
3634 	adev->rmmio = NULL;
3635 
3636 	return r;
3637 }
3638 
3639 /**
3640  * amdgpu_device_fini - tear down the driver
3641  *
3642  * @adev: amdgpu_device pointer
3643  *
3644  * Tear down the driver info (all asics).
3645  * Called at driver shutdown.
3646  */
3647 void amdgpu_device_fini(struct amdgpu_device *adev)
3648 {
3649 	dev_info(adev->dev, "amdgpu: finishing device.\n");
3650 	flush_delayed_work(&adev->delayed_init_work);
3651 	ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3652 	adev->shutdown = true;
3653 
3654 	kfree(adev->pci_state);
3655 
3656 	/* make sure IB test finished before entering exclusive mode
3657 	 * to avoid preemption on IB test
3658 	 * */
3659 	if (amdgpu_sriov_vf(adev)) {
3660 		amdgpu_virt_request_full_gpu(adev, false);
3661 		amdgpu_virt_fini_data_exchange(adev);
3662 	}
3663 
3664 	/* disable all interrupts */
3665 	amdgpu_irq_disable_all(adev);
3666 	if (adev->mode_info.mode_config_initialized){
3667 		if (!amdgpu_device_has_dc_support(adev))
3668 			drm_helper_force_disable_all(adev_to_drm(adev));
3669 		else
3670 			drm_atomic_helper_shutdown(adev_to_drm(adev));
3671 	}
3672 	amdgpu_fence_driver_fini(adev);
3673 	if (adev->pm_sysfs_en)
3674 		amdgpu_pm_sysfs_fini(adev);
3675 	amdgpu_fbdev_fini(adev);
3676 	amdgpu_device_ip_fini(adev);
3677 	release_firmware(adev->firmware.gpu_info_fw);
3678 	adev->firmware.gpu_info_fw = NULL;
3679 	adev->accel_working = false;
3680 
3681 	amdgpu_reset_fini(adev);
3682 
3683 	/* free i2c buses */
3684 	if (!amdgpu_device_has_dc_support(adev))
3685 		amdgpu_i2c_fini(adev);
3686 
3687 	if (amdgpu_emu_mode != 1)
3688 		amdgpu_atombios_fini(adev);
3689 
3690 	kfree(adev->bios);
3691 	adev->bios = NULL;
3692 	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3693 		vga_switcheroo_unregister_client(adev->pdev);
3694 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3695 	}
3696 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3697 		vga_client_register(adev->pdev, NULL, NULL, NULL);
3698 	iounmap(adev->rmmio);
3699 	adev->rmmio = NULL;
3700 	amdgpu_device_doorbell_fini(adev);
3701 
3702 	if (adev->ucode_sysfs_en)
3703 		amdgpu_ucode_sysfs_fini(adev);
3704 
3705 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3706 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3707 		amdgpu_pmu_fini(adev);
3708 	if (adev->mman.discovery_bin)
3709 		amdgpu_discovery_fini(adev);
3710 }
3711 
3712 
3713 /*
3714  * Suspend & resume.
3715  */
3716 /**
3717  * amdgpu_device_suspend - initiate device suspend
3718  *
3719  * @dev: drm dev pointer
3720  * @fbcon : notify the fbdev of suspend
3721  *
3722  * Puts the hw in the suspend state (all asics).
3723  * Returns 0 for success or an error on failure.
3724  * Called at driver suspend.
3725  */
3726 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3727 {
3728 	struct amdgpu_device *adev = drm_to_adev(dev);
3729 
3730 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3731 		return 0;
3732 
3733 	adev->in_suspend = true;
3734 	drm_kms_helper_poll_disable(dev);
3735 
3736 	if (fbcon)
3737 		amdgpu_fbdev_set_suspend(adev, 1);
3738 
3739 	cancel_delayed_work_sync(&adev->delayed_init_work);
3740 
3741 	amdgpu_ras_suspend(adev);
3742 
3743 	amdgpu_device_ip_suspend_phase1(adev);
3744 
3745 	if (!adev->in_s0ix)
3746 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3747 
3748 	/* evict vram memory */
3749 	amdgpu_bo_evict_vram(adev);
3750 
3751 	amdgpu_fence_driver_suspend(adev);
3752 
3753 	amdgpu_device_ip_suspend_phase2(adev);
3754 	/* evict remaining vram memory
3755 	 * This second call to evict vram is to evict the gart page table
3756 	 * using the CPU.
3757 	 */
3758 	amdgpu_bo_evict_vram(adev);
3759 
3760 	return 0;
3761 }
3762 
3763 /**
3764  * amdgpu_device_resume - initiate device resume
3765  *
3766  * @dev: drm dev pointer
3767  * @fbcon : notify the fbdev of resume
3768  *
3769  * Bring the hw back to operating state (all asics).
3770  * Returns 0 for success or an error on failure.
3771  * Called at driver resume.
3772  */
3773 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3774 {
3775 	struct amdgpu_device *adev = drm_to_adev(dev);
3776 	int r = 0;
3777 
3778 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3779 		return 0;
3780 
3781 	if (adev->in_s0ix)
3782 		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3783 
3784 	/* post card */
3785 	if (amdgpu_device_need_post(adev)) {
3786 		r = amdgpu_device_asic_init(adev);
3787 		if (r)
3788 			dev_err(adev->dev, "amdgpu asic init failed\n");
3789 	}
3790 
3791 	r = amdgpu_device_ip_resume(adev);
3792 	if (r) {
3793 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3794 		return r;
3795 	}
3796 	amdgpu_fence_driver_resume(adev);
3797 
3798 
3799 	r = amdgpu_device_ip_late_init(adev);
3800 	if (r)
3801 		return r;
3802 
3803 	queue_delayed_work(system_wq, &adev->delayed_init_work,
3804 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
3805 
3806 	if (!adev->in_s0ix) {
3807 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3808 		if (r)
3809 			return r;
3810 	}
3811 
3812 	/* Make sure IB tests flushed */
3813 	flush_delayed_work(&adev->delayed_init_work);
3814 
3815 	if (fbcon)
3816 		amdgpu_fbdev_set_suspend(adev, 0);
3817 
3818 	drm_kms_helper_poll_enable(dev);
3819 
3820 	amdgpu_ras_resume(adev);
3821 
3822 	/*
3823 	 * Most of the connector probing functions try to acquire runtime pm
3824 	 * refs to ensure that the GPU is powered on when connector polling is
3825 	 * performed. Since we're calling this from a runtime PM callback,
3826 	 * trying to acquire rpm refs will cause us to deadlock.
3827 	 *
3828 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
3829 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
3830 	 */
3831 #ifdef CONFIG_PM
3832 	dev->dev->power.disable_depth++;
3833 #endif
3834 	if (!amdgpu_device_has_dc_support(adev))
3835 		drm_helper_hpd_irq_event(dev);
3836 	else
3837 		drm_kms_helper_hotplug_event(dev);
3838 #ifdef CONFIG_PM
3839 	dev->dev->power.disable_depth--;
3840 #endif
3841 	adev->in_suspend = false;
3842 
3843 	return 0;
3844 }
3845 
3846 /**
3847  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
3848  *
3849  * @adev: amdgpu_device pointer
3850  *
3851  * The list of all the hardware IPs that make up the asic is walked and
3852  * the check_soft_reset callbacks are run.  check_soft_reset determines
3853  * if the asic is still hung or not.
3854  * Returns true if any of the IPs are still in a hung state, false if not.
3855  */
3856 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3857 {
3858 	int i;
3859 	bool asic_hang = false;
3860 
3861 	if (amdgpu_sriov_vf(adev))
3862 		return true;
3863 
3864 	if (amdgpu_asic_need_full_reset(adev))
3865 		return true;
3866 
3867 	for (i = 0; i < adev->num_ip_blocks; i++) {
3868 		if (!adev->ip_blocks[i].status.valid)
3869 			continue;
3870 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
3871 			adev->ip_blocks[i].status.hang =
3872 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
3873 		if (adev->ip_blocks[i].status.hang) {
3874 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3875 			asic_hang = true;
3876 		}
3877 	}
3878 	return asic_hang;
3879 }
3880 
3881 /**
3882  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
3883  *
3884  * @adev: amdgpu_device pointer
3885  *
3886  * The list of all the hardware IPs that make up the asic is walked and the
3887  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
3888  * handles any IP specific hardware or software state changes that are
3889  * necessary for a soft reset to succeed.
3890  * Returns 0 on success, negative error code on failure.
3891  */
3892 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3893 {
3894 	int i, r = 0;
3895 
3896 	for (i = 0; i < adev->num_ip_blocks; i++) {
3897 		if (!adev->ip_blocks[i].status.valid)
3898 			continue;
3899 		if (adev->ip_blocks[i].status.hang &&
3900 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
3901 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3902 			if (r)
3903 				return r;
3904 		}
3905 	}
3906 
3907 	return 0;
3908 }
3909 
3910 /**
3911  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
3912  *
3913  * @adev: amdgpu_device pointer
3914  *
3915  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
3916  * reset is necessary to recover.
3917  * Returns true if a full asic reset is required, false if not.
3918  */
3919 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3920 {
3921 	int i;
3922 
3923 	if (amdgpu_asic_need_full_reset(adev))
3924 		return true;
3925 
3926 	for (i = 0; i < adev->num_ip_blocks; i++) {
3927 		if (!adev->ip_blocks[i].status.valid)
3928 			continue;
3929 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
3930 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
3931 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3932 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
3933 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3934 			if (adev->ip_blocks[i].status.hang) {
3935 				dev_info(adev->dev, "Some block need full reset!\n");
3936 				return true;
3937 			}
3938 		}
3939 	}
3940 	return false;
3941 }
3942 
3943 /**
3944  * amdgpu_device_ip_soft_reset - do a soft reset
3945  *
3946  * @adev: amdgpu_device pointer
3947  *
3948  * The list of all the hardware IPs that make up the asic is walked and the
3949  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
3950  * IP specific hardware or software state changes that are necessary to soft
3951  * reset the IP.
3952  * Returns 0 on success, negative error code on failure.
3953  */
3954 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3955 {
3956 	int i, r = 0;
3957 
3958 	for (i = 0; i < adev->num_ip_blocks; i++) {
3959 		if (!adev->ip_blocks[i].status.valid)
3960 			continue;
3961 		if (adev->ip_blocks[i].status.hang &&
3962 		    adev->ip_blocks[i].version->funcs->soft_reset) {
3963 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3964 			if (r)
3965 				return r;
3966 		}
3967 	}
3968 
3969 	return 0;
3970 }
3971 
3972 /**
3973  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
3974  *
3975  * @adev: amdgpu_device pointer
3976  *
3977  * The list of all the hardware IPs that make up the asic is walked and the
3978  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
3979  * handles any IP specific hardware or software state changes that are
3980  * necessary after the IP has been soft reset.
3981  * Returns 0 on success, negative error code on failure.
3982  */
3983 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3984 {
3985 	int i, r = 0;
3986 
3987 	for (i = 0; i < adev->num_ip_blocks; i++) {
3988 		if (!adev->ip_blocks[i].status.valid)
3989 			continue;
3990 		if (adev->ip_blocks[i].status.hang &&
3991 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
3992 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3993 		if (r)
3994 			return r;
3995 	}
3996 
3997 	return 0;
3998 }
3999 
4000 /**
4001  * amdgpu_device_recover_vram - Recover some VRAM contents
4002  *
4003  * @adev: amdgpu_device pointer
4004  *
4005  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4006  * restore things like GPUVM page tables after a GPU reset where
4007  * the contents of VRAM might be lost.
4008  *
4009  * Returns:
4010  * 0 on success, negative error code on failure.
4011  */
4012 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4013 {
4014 	struct dma_fence *fence = NULL, *next = NULL;
4015 	struct amdgpu_bo *shadow;
4016 	long r = 1, tmo;
4017 
4018 	if (amdgpu_sriov_runtime(adev))
4019 		tmo = msecs_to_jiffies(8000);
4020 	else
4021 		tmo = msecs_to_jiffies(100);
4022 
4023 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4024 	mutex_lock(&adev->shadow_list_lock);
4025 	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
4026 
4027 		/* No need to recover an evicted BO */
4028 		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4029 		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4030 		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
4031 			continue;
4032 
4033 		r = amdgpu_bo_restore_shadow(shadow, &next);
4034 		if (r)
4035 			break;
4036 
4037 		if (fence) {
4038 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4039 			dma_fence_put(fence);
4040 			fence = next;
4041 			if (tmo == 0) {
4042 				r = -ETIMEDOUT;
4043 				break;
4044 			} else if (tmo < 0) {
4045 				r = tmo;
4046 				break;
4047 			}
4048 		} else {
4049 			fence = next;
4050 		}
4051 	}
4052 	mutex_unlock(&adev->shadow_list_lock);
4053 
4054 	if (fence)
4055 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4056 	dma_fence_put(fence);
4057 
4058 	if (r < 0 || tmo <= 0) {
4059 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4060 		return -EIO;
4061 	}
4062 
4063 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4064 	return 0;
4065 }
4066 
4067 
4068 /**
4069  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4070  *
4071  * @adev: amdgpu_device pointer
4072  * @from_hypervisor: request from hypervisor
4073  *
4074  * do VF FLR and reinitialize Asic
4075  * return 0 means succeeded otherwise failed
4076  */
4077 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4078 				     bool from_hypervisor)
4079 {
4080 	int r;
4081 
4082 	if (from_hypervisor)
4083 		r = amdgpu_virt_request_full_gpu(adev, true);
4084 	else
4085 		r = amdgpu_virt_reset_gpu(adev);
4086 	if (r)
4087 		return r;
4088 
4089 	amdgpu_amdkfd_pre_reset(adev);
4090 
4091 	/* Resume IP prior to SMC */
4092 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4093 	if (r)
4094 		goto error;
4095 
4096 	amdgpu_virt_init_data_exchange(adev);
4097 	/* we need recover gart prior to run SMC/CP/SDMA resume */
4098 	amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4099 
4100 	r = amdgpu_device_fw_loading(adev);
4101 	if (r)
4102 		return r;
4103 
4104 	/* now we are okay to resume SMC/CP/SDMA */
4105 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4106 	if (r)
4107 		goto error;
4108 
4109 	amdgpu_irq_gpu_reset_resume_helper(adev);
4110 	r = amdgpu_ib_ring_tests(adev);
4111 	amdgpu_amdkfd_post_reset(adev);
4112 
4113 error:
4114 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4115 		amdgpu_inc_vram_lost(adev);
4116 		r = amdgpu_device_recover_vram(adev);
4117 	}
4118 	amdgpu_virt_release_full_gpu(adev, true);
4119 
4120 	return r;
4121 }
4122 
4123 /**
4124  * amdgpu_device_has_job_running - check if there is any job in mirror list
4125  *
4126  * @adev: amdgpu_device pointer
4127  *
4128  * check if there is any job in mirror list
4129  */
4130 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4131 {
4132 	int i;
4133 	struct drm_sched_job *job;
4134 
4135 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4136 		struct amdgpu_ring *ring = adev->rings[i];
4137 
4138 		if (!ring || !ring->sched.thread)
4139 			continue;
4140 
4141 		spin_lock(&ring->sched.job_list_lock);
4142 		job = list_first_entry_or_null(&ring->sched.pending_list,
4143 					       struct drm_sched_job, list);
4144 		spin_unlock(&ring->sched.job_list_lock);
4145 		if (job)
4146 			return true;
4147 	}
4148 	return false;
4149 }
4150 
4151 /**
4152  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4153  *
4154  * @adev: amdgpu_device pointer
4155  *
4156  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4157  * a hung GPU.
4158  */
4159 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4160 {
4161 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4162 		dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4163 		return false;
4164 	}
4165 
4166 	if (amdgpu_gpu_recovery == 0)
4167 		goto disabled;
4168 
4169 	if (amdgpu_sriov_vf(adev))
4170 		return true;
4171 
4172 	if (amdgpu_gpu_recovery == -1) {
4173 		switch (adev->asic_type) {
4174 		case CHIP_BONAIRE:
4175 		case CHIP_HAWAII:
4176 		case CHIP_TOPAZ:
4177 		case CHIP_TONGA:
4178 		case CHIP_FIJI:
4179 		case CHIP_POLARIS10:
4180 		case CHIP_POLARIS11:
4181 		case CHIP_POLARIS12:
4182 		case CHIP_VEGAM:
4183 		case CHIP_VEGA20:
4184 		case CHIP_VEGA10:
4185 		case CHIP_VEGA12:
4186 		case CHIP_RAVEN:
4187 		case CHIP_ARCTURUS:
4188 		case CHIP_RENOIR:
4189 		case CHIP_NAVI10:
4190 		case CHIP_NAVI14:
4191 		case CHIP_NAVI12:
4192 		case CHIP_SIENNA_CICHLID:
4193 		case CHIP_NAVY_FLOUNDER:
4194 		case CHIP_DIMGREY_CAVEFISH:
4195 		case CHIP_VANGOGH:
4196 		case CHIP_ALDEBARAN:
4197 			break;
4198 		default:
4199 			goto disabled;
4200 		}
4201 	}
4202 
4203 	return true;
4204 
4205 disabled:
4206 		dev_info(adev->dev, "GPU recovery disabled.\n");
4207 		return false;
4208 }
4209 
4210 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4211 {
4212         u32 i;
4213         int ret = 0;
4214 
4215         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4216 
4217         dev_info(adev->dev, "GPU mode1 reset\n");
4218 
4219         /* disable BM */
4220         pci_clear_master(adev->pdev);
4221 
4222         amdgpu_device_cache_pci_state(adev->pdev);
4223 
4224         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4225                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4226                 ret = amdgpu_dpm_mode1_reset(adev);
4227         } else {
4228                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4229                 ret = psp_gpu_reset(adev);
4230         }
4231 
4232         if (ret)
4233                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4234 
4235         amdgpu_device_load_pci_state(adev->pdev);
4236 
4237         /* wait for asic to come out of reset */
4238         for (i = 0; i < adev->usec_timeout; i++) {
4239                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4240 
4241                 if (memsize != 0xffffffff)
4242                         break;
4243                 udelay(1);
4244         }
4245 
4246         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4247         return ret;
4248 }
4249 
4250 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4251 				 struct amdgpu_reset_context *reset_context)
4252 {
4253 	int i, r = 0;
4254 	struct amdgpu_job *job = NULL;
4255 	bool need_full_reset =
4256 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4257 
4258 	if (reset_context->reset_req_dev == adev)
4259 		job = reset_context->job;
4260 
4261 	/* no need to dump if device is not in good state during probe period */
4262 	if (!adev->gmc.xgmi.pending_reset)
4263 		amdgpu_debugfs_wait_dump(adev);
4264 
4265 	if (amdgpu_sriov_vf(adev)) {
4266 		/* stop the data exchange thread */
4267 		amdgpu_virt_fini_data_exchange(adev);
4268 	}
4269 
4270 	/* block all schedulers and reset given job's ring */
4271 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4272 		struct amdgpu_ring *ring = adev->rings[i];
4273 
4274 		if (!ring || !ring->sched.thread)
4275 			continue;
4276 
4277 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4278 		amdgpu_fence_driver_force_completion(ring);
4279 	}
4280 
4281 	if(job)
4282 		drm_sched_increase_karma(&job->base);
4283 
4284 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4285 	/* If reset handler not implemented, continue; otherwise return */
4286 	if (r == -ENOSYS)
4287 		r = 0;
4288 	else
4289 		return r;
4290 
4291 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4292 	if (!amdgpu_sriov_vf(adev)) {
4293 
4294 		if (!need_full_reset)
4295 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4296 
4297 		if (!need_full_reset) {
4298 			amdgpu_device_ip_pre_soft_reset(adev);
4299 			r = amdgpu_device_ip_soft_reset(adev);
4300 			amdgpu_device_ip_post_soft_reset(adev);
4301 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4302 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4303 				need_full_reset = true;
4304 			}
4305 		}
4306 
4307 		if (need_full_reset)
4308 			r = amdgpu_device_ip_suspend(adev);
4309 		if (need_full_reset)
4310 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4311 		else
4312 			clear_bit(AMDGPU_NEED_FULL_RESET,
4313 				  &reset_context->flags);
4314 	}
4315 
4316 	return r;
4317 }
4318 
4319 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4320 			 struct amdgpu_reset_context *reset_context)
4321 {
4322 	struct amdgpu_device *tmp_adev = NULL;
4323 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4324 	int r = 0;
4325 
4326 	/* Try reset handler method first */
4327 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4328 				    reset_list);
4329 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4330 	/* If reset handler not implemented, continue; otherwise return */
4331 	if (r == -ENOSYS)
4332 		r = 0;
4333 	else
4334 		return r;
4335 
4336 	/* Reset handler not implemented, use the default method */
4337 	need_full_reset =
4338 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4339 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4340 
4341 	/*
4342 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4343 	 * to allow proper links negotiation in FW (within 1 sec)
4344 	 */
4345 	if (!skip_hw_reset && need_full_reset) {
4346 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4347 			/* For XGMI run all resets in parallel to speed up the process */
4348 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4349 				tmp_adev->gmc.xgmi.pending_reset = false;
4350 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4351 					r = -EALREADY;
4352 			} else
4353 				r = amdgpu_asic_reset(tmp_adev);
4354 
4355 			if (r) {
4356 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4357 					 r, adev_to_drm(tmp_adev)->unique);
4358 				break;
4359 			}
4360 		}
4361 
4362 		/* For XGMI wait for all resets to complete before proceed */
4363 		if (!r) {
4364 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4365 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4366 					flush_work(&tmp_adev->xgmi_reset_work);
4367 					r = tmp_adev->asic_reset_res;
4368 					if (r)
4369 						break;
4370 				}
4371 			}
4372 		}
4373 	}
4374 
4375 	if (!r && amdgpu_ras_intr_triggered()) {
4376 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4377 			if (tmp_adev->mmhub.ras_funcs &&
4378 			    tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4379 				tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4380 		}
4381 
4382 		amdgpu_ras_intr_cleared();
4383 	}
4384 
4385 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4386 		if (need_full_reset) {
4387 			/* post card */
4388 			r = amdgpu_device_asic_init(tmp_adev);
4389 			if (r) {
4390 				dev_warn(tmp_adev->dev, "asic atom init failed!");
4391 			} else {
4392 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4393 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4394 				if (r)
4395 					goto out;
4396 
4397 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4398 				if (vram_lost) {
4399 					DRM_INFO("VRAM is lost due to GPU reset!\n");
4400 					amdgpu_inc_vram_lost(tmp_adev);
4401 				}
4402 
4403 				r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4404 				if (r)
4405 					goto out;
4406 
4407 				r = amdgpu_device_fw_loading(tmp_adev);
4408 				if (r)
4409 					return r;
4410 
4411 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4412 				if (r)
4413 					goto out;
4414 
4415 				if (vram_lost)
4416 					amdgpu_device_fill_reset_magic(tmp_adev);
4417 
4418 				/*
4419 				 * Add this ASIC as tracked as reset was already
4420 				 * complete successfully.
4421 				 */
4422 				amdgpu_register_gpu_instance(tmp_adev);
4423 
4424 				if (!reset_context->hive &&
4425 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4426 					amdgpu_xgmi_add_device(tmp_adev);
4427 
4428 				r = amdgpu_device_ip_late_init(tmp_adev);
4429 				if (r)
4430 					goto out;
4431 
4432 				amdgpu_fbdev_set_suspend(tmp_adev, 0);
4433 
4434 				/*
4435 				 * The GPU enters bad state once faulty pages
4436 				 * by ECC has reached the threshold, and ras
4437 				 * recovery is scheduled next. So add one check
4438 				 * here to break recovery if it indeed exceeds
4439 				 * bad page threshold, and remind user to
4440 				 * retire this GPU or setting one bigger
4441 				 * bad_page_threshold value to fix this once
4442 				 * probing driver again.
4443 				 */
4444 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4445 					/* must succeed. */
4446 					amdgpu_ras_resume(tmp_adev);
4447 				} else {
4448 					r = -EINVAL;
4449 					goto out;
4450 				}
4451 
4452 				/* Update PSP FW topology after reset */
4453 				if (reset_context->hive &&
4454 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4455 					r = amdgpu_xgmi_update_topology(
4456 						reset_context->hive, tmp_adev);
4457 			}
4458 		}
4459 
4460 out:
4461 		if (!r) {
4462 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4463 			r = amdgpu_ib_ring_tests(tmp_adev);
4464 			if (r) {
4465 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4466 				r = amdgpu_device_ip_suspend(tmp_adev);
4467 				need_full_reset = true;
4468 				r = -EAGAIN;
4469 				goto end;
4470 			}
4471 		}
4472 
4473 		if (!r)
4474 			r = amdgpu_device_recover_vram(tmp_adev);
4475 		else
4476 			tmp_adev->asic_reset_res = r;
4477 	}
4478 
4479 end:
4480 	if (need_full_reset)
4481 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4482 	else
4483 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4484 	return r;
4485 }
4486 
4487 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4488 				struct amdgpu_hive_info *hive)
4489 {
4490 	if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4491 		return false;
4492 
4493 	if (hive) {
4494 		down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4495 	} else {
4496 		down_write(&adev->reset_sem);
4497 	}
4498 
4499 	switch (amdgpu_asic_reset_method(adev)) {
4500 	case AMD_RESET_METHOD_MODE1:
4501 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4502 		break;
4503 	case AMD_RESET_METHOD_MODE2:
4504 		adev->mp1_state = PP_MP1_STATE_RESET;
4505 		break;
4506 	default:
4507 		adev->mp1_state = PP_MP1_STATE_NONE;
4508 		break;
4509 	}
4510 
4511 	return true;
4512 }
4513 
4514 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4515 {
4516 	amdgpu_vf_error_trans_all(adev);
4517 	adev->mp1_state = PP_MP1_STATE_NONE;
4518 	atomic_set(&adev->in_gpu_reset, 0);
4519 	up_write(&adev->reset_sem);
4520 }
4521 
4522 /*
4523  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4524  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4525  *
4526  * unlock won't require roll back.
4527  */
4528 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4529 {
4530 	struct amdgpu_device *tmp_adev = NULL;
4531 
4532 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4533 		if (!hive) {
4534 			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4535 			return -ENODEV;
4536 		}
4537 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4538 			if (!amdgpu_device_lock_adev(tmp_adev, hive))
4539 				goto roll_back;
4540 		}
4541 	} else if (!amdgpu_device_lock_adev(adev, hive))
4542 		return -EAGAIN;
4543 
4544 	return 0;
4545 roll_back:
4546 	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4547 		/*
4548 		 * if the lockup iteration break in the middle of a hive,
4549 		 * it may means there may has a race issue,
4550 		 * or a hive device locked up independently.
4551 		 * we may be in trouble and may not, so will try to roll back
4552 		 * the lock and give out a warnning.
4553 		 */
4554 		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4555 		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4556 			amdgpu_device_unlock_adev(tmp_adev);
4557 		}
4558 	}
4559 	return -EAGAIN;
4560 }
4561 
4562 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4563 {
4564 	struct pci_dev *p = NULL;
4565 
4566 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4567 			adev->pdev->bus->number, 1);
4568 	if (p) {
4569 		pm_runtime_enable(&(p->dev));
4570 		pm_runtime_resume(&(p->dev));
4571 	}
4572 }
4573 
4574 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4575 {
4576 	enum amd_reset_method reset_method;
4577 	struct pci_dev *p = NULL;
4578 	u64 expires;
4579 
4580 	/*
4581 	 * For now, only BACO and mode1 reset are confirmed
4582 	 * to suffer the audio issue without proper suspended.
4583 	 */
4584 	reset_method = amdgpu_asic_reset_method(adev);
4585 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4586 	     (reset_method != AMD_RESET_METHOD_MODE1))
4587 		return -EINVAL;
4588 
4589 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4590 			adev->pdev->bus->number, 1);
4591 	if (!p)
4592 		return -ENODEV;
4593 
4594 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4595 	if (!expires)
4596 		/*
4597 		 * If we cannot get the audio device autosuspend delay,
4598 		 * a fixed 4S interval will be used. Considering 3S is
4599 		 * the audio controller default autosuspend delay setting.
4600 		 * 4S used here is guaranteed to cover that.
4601 		 */
4602 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4603 
4604 	while (!pm_runtime_status_suspended(&(p->dev))) {
4605 		if (!pm_runtime_suspend(&(p->dev)))
4606 			break;
4607 
4608 		if (expires < ktime_get_mono_fast_ns()) {
4609 			dev_warn(adev->dev, "failed to suspend display audio\n");
4610 			/* TODO: abort the succeeding gpu reset? */
4611 			return -ETIMEDOUT;
4612 		}
4613 	}
4614 
4615 	pm_runtime_disable(&(p->dev));
4616 
4617 	return 0;
4618 }
4619 
4620 void amdgpu_device_recheck_guilty_jobs(
4621 	struct amdgpu_device *adev, struct list_head *device_list_handle,
4622 	struct amdgpu_reset_context *reset_context)
4623 {
4624 	int i, r = 0;
4625 
4626 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4627 		struct amdgpu_ring *ring = adev->rings[i];
4628 		int ret = 0;
4629 		struct drm_sched_job *s_job;
4630 
4631 		if (!ring || !ring->sched.thread)
4632 			continue;
4633 
4634 		s_job = list_first_entry_or_null(&ring->sched.pending_list,
4635 				struct drm_sched_job, list);
4636 		if (s_job == NULL)
4637 			continue;
4638 
4639 		/* clear job's guilty and depend the folowing step to decide the real one */
4640 		drm_sched_reset_karma(s_job);
4641 		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4642 
4643 		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4644 		if (ret == 0) { /* timeout */
4645 			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4646 						ring->sched.name, s_job->id);
4647 
4648 			/* set guilty */
4649 			drm_sched_increase_karma(s_job);
4650 retry:
4651 			/* do hw reset */
4652 			if (amdgpu_sriov_vf(adev)) {
4653 				amdgpu_virt_fini_data_exchange(adev);
4654 				r = amdgpu_device_reset_sriov(adev, false);
4655 				if (r)
4656 					adev->asic_reset_res = r;
4657 			} else {
4658 				clear_bit(AMDGPU_SKIP_HW_RESET,
4659 					  &reset_context->flags);
4660 				r = amdgpu_do_asic_reset(device_list_handle,
4661 							 reset_context);
4662 				if (r && r == -EAGAIN)
4663 					goto retry;
4664 			}
4665 
4666 			/*
4667 			 * add reset counter so that the following
4668 			 * resubmitted job could flush vmid
4669 			 */
4670 			atomic_inc(&adev->gpu_reset_counter);
4671 			continue;
4672 		}
4673 
4674 		/* got the hw fence, signal finished fence */
4675 		atomic_dec(ring->sched.score);
4676 		dma_fence_get(&s_job->s_fence->finished);
4677 		dma_fence_signal(&s_job->s_fence->finished);
4678 		dma_fence_put(&s_job->s_fence->finished);
4679 
4680 		/* remove node from list and free the job */
4681 		spin_lock(&ring->sched.job_list_lock);
4682 		list_del_init(&s_job->list);
4683 		spin_unlock(&ring->sched.job_list_lock);
4684 		ring->sched.ops->free_job(s_job);
4685 	}
4686 }
4687 
4688 /**
4689  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4690  *
4691  * @adev: amdgpu_device pointer
4692  * @job: which job trigger hang
4693  *
4694  * Attempt to reset the GPU if it has hung (all asics).
4695  * Attempt to do soft-reset or full-reset and reinitialize Asic
4696  * Returns 0 for success or an error on failure.
4697  */
4698 
4699 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4700 			      struct amdgpu_job *job)
4701 {
4702 	struct list_head device_list, *device_list_handle =  NULL;
4703 	bool job_signaled = false;
4704 	struct amdgpu_hive_info *hive = NULL;
4705 	struct amdgpu_device *tmp_adev = NULL;
4706 	int i, r = 0;
4707 	bool need_emergency_restart = false;
4708 	bool audio_suspended = false;
4709 	int tmp_vram_lost_counter;
4710 	struct amdgpu_reset_context reset_context;
4711 
4712 	memset(&reset_context, 0, sizeof(reset_context));
4713 
4714 	/*
4715 	 * Special case: RAS triggered and full reset isn't supported
4716 	 */
4717 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4718 
4719 	/*
4720 	 * Flush RAM to disk so that after reboot
4721 	 * the user can read log and see why the system rebooted.
4722 	 */
4723 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4724 		DRM_WARN("Emergency reboot.");
4725 
4726 		ksys_sync_helper();
4727 		emergency_restart();
4728 	}
4729 
4730 	dev_info(adev->dev, "GPU %s begin!\n",
4731 		need_emergency_restart ? "jobs stop":"reset");
4732 
4733 	/*
4734 	 * Here we trylock to avoid chain of resets executing from
4735 	 * either trigger by jobs on different adevs in XGMI hive or jobs on
4736 	 * different schedulers for same device while this TO handler is running.
4737 	 * We always reset all schedulers for device and all devices for XGMI
4738 	 * hive so that should take care of them too.
4739 	 */
4740 	hive = amdgpu_get_xgmi_hive(adev);
4741 	if (hive) {
4742 		if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4743 			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4744 				job ? job->base.id : -1, hive->hive_id);
4745 			amdgpu_put_xgmi_hive(hive);
4746 			if (job)
4747 				drm_sched_increase_karma(&job->base);
4748 			return 0;
4749 		}
4750 		mutex_lock(&hive->hive_lock);
4751 	}
4752 
4753 	reset_context.method = AMD_RESET_METHOD_NONE;
4754 	reset_context.reset_req_dev = adev;
4755 	reset_context.job = job;
4756 	reset_context.hive = hive;
4757 	clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4758 
4759 	/*
4760 	 * lock the device before we try to operate the linked list
4761 	 * if didn't get the device lock, don't touch the linked list since
4762 	 * others may iterating it.
4763 	 */
4764 	r = amdgpu_device_lock_hive_adev(adev, hive);
4765 	if (r) {
4766 		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4767 					job ? job->base.id : -1);
4768 
4769 		/* even we skipped this reset, still need to set the job to guilty */
4770 		if (job)
4771 			drm_sched_increase_karma(&job->base);
4772 		goto skip_recovery;
4773 	}
4774 
4775 	/*
4776 	 * Build list of devices to reset.
4777 	 * In case we are in XGMI hive mode, resort the device list
4778 	 * to put adev in the 1st position.
4779 	 */
4780 	INIT_LIST_HEAD(&device_list);
4781 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4782 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
4783 			list_add_tail(&tmp_adev->reset_list, &device_list);
4784 		if (!list_is_first(&adev->reset_list, &device_list))
4785 			list_rotate_to_front(&adev->reset_list, &device_list);
4786 		device_list_handle = &device_list;
4787 	} else {
4788 		list_add_tail(&adev->reset_list, &device_list);
4789 		device_list_handle = &device_list;
4790 	}
4791 
4792 	/* block all schedulers and reset given job's ring */
4793 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4794 		/*
4795 		 * Try to put the audio codec into suspend state
4796 		 * before gpu reset started.
4797 		 *
4798 		 * Due to the power domain of the graphics device
4799 		 * is shared with AZ power domain. Without this,
4800 		 * we may change the audio hardware from behind
4801 		 * the audio driver's back. That will trigger
4802 		 * some audio codec errors.
4803 		 */
4804 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
4805 			audio_suspended = true;
4806 
4807 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
4808 
4809 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
4810 
4811 		if (!amdgpu_sriov_vf(tmp_adev))
4812 			amdgpu_amdkfd_pre_reset(tmp_adev);
4813 
4814 		/*
4815 		 * Mark these ASICs to be reseted as untracked first
4816 		 * And add them back after reset completed
4817 		 */
4818 		amdgpu_unregister_gpu_instance(tmp_adev);
4819 
4820 		amdgpu_fbdev_set_suspend(tmp_adev, 1);
4821 
4822 		/* disable ras on ALL IPs */
4823 		if (!need_emergency_restart &&
4824 		      amdgpu_device_ip_need_full_reset(tmp_adev))
4825 			amdgpu_ras_suspend(tmp_adev);
4826 
4827 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4828 			struct amdgpu_ring *ring = tmp_adev->rings[i];
4829 
4830 			if (!ring || !ring->sched.thread)
4831 				continue;
4832 
4833 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4834 
4835 			if (need_emergency_restart)
4836 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4837 		}
4838 		atomic_inc(&tmp_adev->gpu_reset_counter);
4839 	}
4840 
4841 	if (need_emergency_restart)
4842 		goto skip_sched_resume;
4843 
4844 	/*
4845 	 * Must check guilty signal here since after this point all old
4846 	 * HW fences are force signaled.
4847 	 *
4848 	 * job->base holds a reference to parent fence
4849 	 */
4850 	if (job && job->base.s_fence->parent &&
4851 	    dma_fence_is_signaled(job->base.s_fence->parent)) {
4852 		job_signaled = true;
4853 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
4854 		goto skip_hw_reset;
4855 	}
4856 
4857 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
4858 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4859 		r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
4860 		/*TODO Should we stop ?*/
4861 		if (r) {
4862 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4863 				  r, adev_to_drm(tmp_adev)->unique);
4864 			tmp_adev->asic_reset_res = r;
4865 		}
4866 	}
4867 
4868 	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
4869 	/* Actual ASIC resets if needed.*/
4870 	/* TODO Implement XGMI hive reset logic for SRIOV */
4871 	if (amdgpu_sriov_vf(adev)) {
4872 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
4873 		if (r)
4874 			adev->asic_reset_res = r;
4875 	} else {
4876 		r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
4877 		if (r && r == -EAGAIN)
4878 			goto retry;
4879 	}
4880 
4881 skip_hw_reset:
4882 
4883 	/* Post ASIC reset for all devs .*/
4884 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4885 
4886 		/*
4887 		 * Sometimes a later bad compute job can block a good gfx job as gfx
4888 		 * and compute ring share internal GC HW mutually. We add an additional
4889 		 * guilty jobs recheck step to find the real guilty job, it synchronously
4890 		 * submits and pends for the first job being signaled. If it gets timeout,
4891 		 * we identify it as a real guilty job.
4892 		 */
4893 		if (amdgpu_gpu_recovery == 2 &&
4894 			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
4895 			amdgpu_device_recheck_guilty_jobs(
4896 				tmp_adev, device_list_handle, &reset_context);
4897 
4898 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4899 			struct amdgpu_ring *ring = tmp_adev->rings[i];
4900 
4901 			if (!ring || !ring->sched.thread)
4902 				continue;
4903 
4904 			/* No point to resubmit jobs if we didn't HW reset*/
4905 			if (!tmp_adev->asic_reset_res && !job_signaled)
4906 				drm_sched_resubmit_jobs(&ring->sched);
4907 
4908 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
4909 		}
4910 
4911 		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4912 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4913 		}
4914 
4915 		tmp_adev->asic_reset_res = 0;
4916 
4917 		if (r) {
4918 			/* bad news, how to tell it to userspace ? */
4919 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4920 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
4921 		} else {
4922 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4923 		}
4924 	}
4925 
4926 skip_sched_resume:
4927 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4928 		/* unlock kfd: SRIOV would do it separately */
4929 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4930 	                amdgpu_amdkfd_post_reset(tmp_adev);
4931 
4932 		/* kfd_post_reset will do nothing if kfd device is not initialized,
4933 		 * need to bring up kfd here if it's not be initialized before
4934 		 */
4935 		if (!adev->kfd.init_complete)
4936 			amdgpu_amdkfd_device_init(adev);
4937 
4938 		if (audio_suspended)
4939 			amdgpu_device_resume_display_audio(tmp_adev);
4940 		amdgpu_device_unlock_adev(tmp_adev);
4941 	}
4942 
4943 skip_recovery:
4944 	if (hive) {
4945 		atomic_set(&hive->in_reset, 0);
4946 		mutex_unlock(&hive->hive_lock);
4947 		amdgpu_put_xgmi_hive(hive);
4948 	}
4949 
4950 	if (r && r != -EAGAIN)
4951 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
4952 	return r;
4953 }
4954 
4955 /**
4956  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
4957  *
4958  * @adev: amdgpu_device pointer
4959  *
4960  * Fetchs and stores in the driver the PCIE capabilities (gen speed
4961  * and lanes) of the slot the device is in. Handles APUs and
4962  * virtualized environments where PCIE config space may not be available.
4963  */
4964 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4965 {
4966 	struct pci_dev *pdev;
4967 	enum pci_bus_speed speed_cap, platform_speed_cap;
4968 	enum pcie_link_width platform_link_width;
4969 
4970 	if (amdgpu_pcie_gen_cap)
4971 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4972 
4973 	if (amdgpu_pcie_lane_cap)
4974 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4975 
4976 	/* covers APUs as well */
4977 	if (pci_is_root_bus(adev->pdev->bus)) {
4978 		if (adev->pm.pcie_gen_mask == 0)
4979 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4980 		if (adev->pm.pcie_mlw_mask == 0)
4981 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4982 		return;
4983 	}
4984 
4985 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
4986 		return;
4987 
4988 	pcie_bandwidth_available(adev->pdev, NULL,
4989 				 &platform_speed_cap, &platform_link_width);
4990 
4991 	if (adev->pm.pcie_gen_mask == 0) {
4992 		/* asic caps */
4993 		pdev = adev->pdev;
4994 		speed_cap = pcie_get_speed_cap(pdev);
4995 		if (speed_cap == PCI_SPEED_UNKNOWN) {
4996 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4997 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
4998 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
4999 		} else {
5000 			if (speed_cap == PCIE_SPEED_32_0GT)
5001 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5002 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5003 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5004 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5005 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5006 			else if (speed_cap == PCIE_SPEED_16_0GT)
5007 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5008 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5009 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5010 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5011 			else if (speed_cap == PCIE_SPEED_8_0GT)
5012 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5013 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5014 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5015 			else if (speed_cap == PCIE_SPEED_5_0GT)
5016 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5017 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5018 			else
5019 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5020 		}
5021 		/* platform caps */
5022 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5023 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5024 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5025 		} else {
5026 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5027 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5028 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5029 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5030 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5031 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5032 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5033 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5034 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5035 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5036 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5037 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5038 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5039 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5040 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5041 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5042 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5043 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5044 			else
5045 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5046 
5047 		}
5048 	}
5049 	if (adev->pm.pcie_mlw_mask == 0) {
5050 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5051 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5052 		} else {
5053 			switch (platform_link_width) {
5054 			case PCIE_LNK_X32:
5055 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5056 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5057 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5058 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5059 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5060 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5061 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5062 				break;
5063 			case PCIE_LNK_X16:
5064 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5065 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5066 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5067 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5068 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5069 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5070 				break;
5071 			case PCIE_LNK_X12:
5072 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5073 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5074 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5075 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5076 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5077 				break;
5078 			case PCIE_LNK_X8:
5079 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5080 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5081 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5082 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5083 				break;
5084 			case PCIE_LNK_X4:
5085 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5086 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5087 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5088 				break;
5089 			case PCIE_LNK_X2:
5090 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5091 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5092 				break;
5093 			case PCIE_LNK_X1:
5094 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5095 				break;
5096 			default:
5097 				break;
5098 			}
5099 		}
5100 	}
5101 }
5102 
5103 int amdgpu_device_baco_enter(struct drm_device *dev)
5104 {
5105 	struct amdgpu_device *adev = drm_to_adev(dev);
5106 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5107 
5108 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5109 		return -ENOTSUPP;
5110 
5111 	if (ras && adev->ras_enabled &&
5112 	    adev->nbio.funcs->enable_doorbell_interrupt)
5113 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5114 
5115 	return amdgpu_dpm_baco_enter(adev);
5116 }
5117 
5118 int amdgpu_device_baco_exit(struct drm_device *dev)
5119 {
5120 	struct amdgpu_device *adev = drm_to_adev(dev);
5121 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5122 	int ret = 0;
5123 
5124 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5125 		return -ENOTSUPP;
5126 
5127 	ret = amdgpu_dpm_baco_exit(adev);
5128 	if (ret)
5129 		return ret;
5130 
5131 	if (ras && adev->ras_enabled &&
5132 	    adev->nbio.funcs->enable_doorbell_interrupt)
5133 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5134 
5135 	return 0;
5136 }
5137 
5138 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5139 {
5140 	int i;
5141 
5142 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5143 		struct amdgpu_ring *ring = adev->rings[i];
5144 
5145 		if (!ring || !ring->sched.thread)
5146 			continue;
5147 
5148 		cancel_delayed_work_sync(&ring->sched.work_tdr);
5149 	}
5150 }
5151 
5152 /**
5153  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5154  * @pdev: PCI device struct
5155  * @state: PCI channel state
5156  *
5157  * Description: Called when a PCI error is detected.
5158  *
5159  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5160  */
5161 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5162 {
5163 	struct drm_device *dev = pci_get_drvdata(pdev);
5164 	struct amdgpu_device *adev = drm_to_adev(dev);
5165 	int i;
5166 
5167 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5168 
5169 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5170 		DRM_WARN("No support for XGMI hive yet...");
5171 		return PCI_ERS_RESULT_DISCONNECT;
5172 	}
5173 
5174 	switch (state) {
5175 	case pci_channel_io_normal:
5176 		return PCI_ERS_RESULT_CAN_RECOVER;
5177 	/* Fatal error, prepare for slot reset */
5178 	case pci_channel_io_frozen:
5179 		/*
5180 		 * Cancel and wait for all TDRs in progress if failing to
5181 		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5182 		 *
5183 		 * Locking adev->reset_sem will prevent any external access
5184 		 * to GPU during PCI error recovery
5185 		 */
5186 		while (!amdgpu_device_lock_adev(adev, NULL))
5187 			amdgpu_cancel_all_tdr(adev);
5188 
5189 		/*
5190 		 * Block any work scheduling as we do for regular GPU reset
5191 		 * for the duration of the recovery
5192 		 */
5193 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5194 			struct amdgpu_ring *ring = adev->rings[i];
5195 
5196 			if (!ring || !ring->sched.thread)
5197 				continue;
5198 
5199 			drm_sched_stop(&ring->sched, NULL);
5200 		}
5201 		atomic_inc(&adev->gpu_reset_counter);
5202 		return PCI_ERS_RESULT_NEED_RESET;
5203 	case pci_channel_io_perm_failure:
5204 		/* Permanent error, prepare for device removal */
5205 		return PCI_ERS_RESULT_DISCONNECT;
5206 	}
5207 
5208 	return PCI_ERS_RESULT_NEED_RESET;
5209 }
5210 
5211 /**
5212  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5213  * @pdev: pointer to PCI device
5214  */
5215 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5216 {
5217 
5218 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5219 
5220 	/* TODO - dump whatever for debugging purposes */
5221 
5222 	/* This called only if amdgpu_pci_error_detected returns
5223 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5224 	 * works, no need to reset slot.
5225 	 */
5226 
5227 	return PCI_ERS_RESULT_RECOVERED;
5228 }
5229 
5230 /**
5231  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5232  * @pdev: PCI device struct
5233  *
5234  * Description: This routine is called by the pci error recovery
5235  * code after the PCI slot has been reset, just before we
5236  * should resume normal operations.
5237  */
5238 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5239 {
5240 	struct drm_device *dev = pci_get_drvdata(pdev);
5241 	struct amdgpu_device *adev = drm_to_adev(dev);
5242 	int r, i;
5243 	struct amdgpu_reset_context reset_context;
5244 	u32 memsize;
5245 	struct list_head device_list;
5246 
5247 	DRM_INFO("PCI error: slot reset callback!!\n");
5248 
5249 	memset(&reset_context, 0, sizeof(reset_context));
5250 
5251 	INIT_LIST_HEAD(&device_list);
5252 	list_add_tail(&adev->reset_list, &device_list);
5253 
5254 	/* wait for asic to come out of reset */
5255 	msleep(500);
5256 
5257 	/* Restore PCI confspace */
5258 	amdgpu_device_load_pci_state(pdev);
5259 
5260 	/* confirm  ASIC came out of reset */
5261 	for (i = 0; i < adev->usec_timeout; i++) {
5262 		memsize = amdgpu_asic_get_config_memsize(adev);
5263 
5264 		if (memsize != 0xffffffff)
5265 			break;
5266 		udelay(1);
5267 	}
5268 	if (memsize == 0xffffffff) {
5269 		r = -ETIME;
5270 		goto out;
5271 	}
5272 
5273 	reset_context.method = AMD_RESET_METHOD_NONE;
5274 	reset_context.reset_req_dev = adev;
5275 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5276 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5277 
5278 	adev->in_pci_err_recovery = true;
5279 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5280 	adev->in_pci_err_recovery = false;
5281 	if (r)
5282 		goto out;
5283 
5284 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5285 
5286 out:
5287 	if (!r) {
5288 		if (amdgpu_device_cache_pci_state(adev->pdev))
5289 			pci_restore_state(adev->pdev);
5290 
5291 		DRM_INFO("PCIe error recovery succeeded\n");
5292 	} else {
5293 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5294 		amdgpu_device_unlock_adev(adev);
5295 	}
5296 
5297 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5298 }
5299 
5300 /**
5301  * amdgpu_pci_resume() - resume normal ops after PCI reset
5302  * @pdev: pointer to PCI device
5303  *
5304  * Called when the error recovery driver tells us that its
5305  * OK to resume normal operation.
5306  */
5307 void amdgpu_pci_resume(struct pci_dev *pdev)
5308 {
5309 	struct drm_device *dev = pci_get_drvdata(pdev);
5310 	struct amdgpu_device *adev = drm_to_adev(dev);
5311 	int i;
5312 
5313 
5314 	DRM_INFO("PCI error: resume callback!!\n");
5315 
5316 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5317 		struct amdgpu_ring *ring = adev->rings[i];
5318 
5319 		if (!ring || !ring->sched.thread)
5320 			continue;
5321 
5322 
5323 		drm_sched_resubmit_jobs(&ring->sched);
5324 		drm_sched_start(&ring->sched, true);
5325 	}
5326 
5327 	amdgpu_device_unlock_adev(adev);
5328 }
5329 
5330 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5331 {
5332 	struct drm_device *dev = pci_get_drvdata(pdev);
5333 	struct amdgpu_device *adev = drm_to_adev(dev);
5334 	int r;
5335 
5336 	r = pci_save_state(pdev);
5337 	if (!r) {
5338 		kfree(adev->pci_state);
5339 
5340 		adev->pci_state = pci_store_saved_state(pdev);
5341 
5342 		if (!adev->pci_state) {
5343 			DRM_ERROR("Failed to store PCI saved state");
5344 			return false;
5345 		}
5346 	} else {
5347 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5348 		return false;
5349 	}
5350 
5351 	return true;
5352 }
5353 
5354 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5355 {
5356 	struct drm_device *dev = pci_get_drvdata(pdev);
5357 	struct amdgpu_device *adev = drm_to_adev(dev);
5358 	int r;
5359 
5360 	if (!adev->pci_state)
5361 		return false;
5362 
5363 	r = pci_load_saved_state(pdev, adev->pci_state);
5364 
5365 	if (!r) {
5366 		pci_restore_state(pdev);
5367 	} else {
5368 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5369 		return false;
5370 	}
5371 
5372 	return true;
5373 }
5374 
5375 
5376