1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39 
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/vgaarb.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/efi.h>
49 #include "amdgpu.h"
50 #include "amdgpu_trace.h"
51 #include "amdgpu_i2c.h"
52 #include "atom.h"
53 #include "amdgpu_atombios.h"
54 #include "amdgpu_atomfirmware.h"
55 #include "amd_pcie.h"
56 #ifdef CONFIG_DRM_AMDGPU_SI
57 #include "si.h"
58 #endif
59 #ifdef CONFIG_DRM_AMDGPU_CIK
60 #include "cik.h"
61 #endif
62 #include "vi.h"
63 #include "soc15.h"
64 #include "nv.h"
65 #include "bif/bif_4_1_d.h"
66 #include <linux/firmware.h>
67 #include "amdgpu_vf_error.h"
68 
69 #include "amdgpu_amdkfd.h"
70 #include "amdgpu_pm.h"
71 
72 #include "amdgpu_xgmi.h"
73 #include "amdgpu_ras.h"
74 #include "amdgpu_pmu.h"
75 #include "amdgpu_fru_eeprom.h"
76 #include "amdgpu_reset.h"
77 
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
81 
82 #include <drm/drm_drv.h>
83 
84 #if IS_ENABLED(CONFIG_X86)
85 #include <asm/intel-family.h>
86 #endif
87 
88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
95 
96 #define AMDGPU_RESUME_MS		2000
97 #define AMDGPU_MAX_RETRY_LIMIT		2
98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
99 
100 static const struct drm_driver amdgpu_kms_driver;
101 
102 const char *amdgpu_asic_name[] = {
103 	"TAHITI",
104 	"PITCAIRN",
105 	"VERDE",
106 	"OLAND",
107 	"HAINAN",
108 	"BONAIRE",
109 	"KAVERI",
110 	"KABINI",
111 	"HAWAII",
112 	"MULLINS",
113 	"TOPAZ",
114 	"TONGA",
115 	"FIJI",
116 	"CARRIZO",
117 	"STONEY",
118 	"POLARIS10",
119 	"POLARIS11",
120 	"POLARIS12",
121 	"VEGAM",
122 	"VEGA10",
123 	"VEGA12",
124 	"VEGA20",
125 	"RAVEN",
126 	"ARCTURUS",
127 	"RENOIR",
128 	"ALDEBARAN",
129 	"NAVI10",
130 	"CYAN_SKILLFISH",
131 	"NAVI14",
132 	"NAVI12",
133 	"SIENNA_CICHLID",
134 	"NAVY_FLOUNDER",
135 	"VANGOGH",
136 	"DIMGREY_CAVEFISH",
137 	"BEIGE_GOBY",
138 	"YELLOW_CARP",
139 	"IP DISCOVERY",
140 	"LAST",
141 };
142 
143 /**
144  * DOC: pcie_replay_count
145  *
146  * The amdgpu driver provides a sysfs API for reporting the total number
147  * of PCIe replays (NAKs)
148  * The file pcie_replay_count is used for this and returns the total
149  * number of replays as a sum of the NAKs generated and NAKs received
150  */
151 
152 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
153 		struct device_attribute *attr, char *buf)
154 {
155 	struct drm_device *ddev = dev_get_drvdata(dev);
156 	struct amdgpu_device *adev = drm_to_adev(ddev);
157 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
158 
159 	return sysfs_emit(buf, "%llu\n", cnt);
160 }
161 
162 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
163 		amdgpu_device_get_pcie_replay_count, NULL);
164 
165 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
166 
167 /**
168  * DOC: product_name
169  *
170  * The amdgpu driver provides a sysfs API for reporting the product name
171  * for the device
172  * The file product_name is used for this and returns the product name
173  * as returned from the FRU.
174  * NOTE: This is only available for certain server cards
175  */
176 
177 static ssize_t amdgpu_device_get_product_name(struct device *dev,
178 		struct device_attribute *attr, char *buf)
179 {
180 	struct drm_device *ddev = dev_get_drvdata(dev);
181 	struct amdgpu_device *adev = drm_to_adev(ddev);
182 
183 	return sysfs_emit(buf, "%s\n", adev->product_name);
184 }
185 
186 static DEVICE_ATTR(product_name, S_IRUGO,
187 		amdgpu_device_get_product_name, NULL);
188 
189 /**
190  * DOC: product_number
191  *
192  * The amdgpu driver provides a sysfs API for reporting the part number
193  * for the device
194  * The file product_number is used for this and returns the part number
195  * as returned from the FRU.
196  * NOTE: This is only available for certain server cards
197  */
198 
199 static ssize_t amdgpu_device_get_product_number(struct device *dev,
200 		struct device_attribute *attr, char *buf)
201 {
202 	struct drm_device *ddev = dev_get_drvdata(dev);
203 	struct amdgpu_device *adev = drm_to_adev(ddev);
204 
205 	return sysfs_emit(buf, "%s\n", adev->product_number);
206 }
207 
208 static DEVICE_ATTR(product_number, S_IRUGO,
209 		amdgpu_device_get_product_number, NULL);
210 
211 /**
212  * DOC: serial_number
213  *
214  * The amdgpu driver provides a sysfs API for reporting the serial number
215  * for the device
216  * The file serial_number is used for this and returns the serial number
217  * as returned from the FRU.
218  * NOTE: This is only available for certain server cards
219  */
220 
221 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
222 		struct device_attribute *attr, char *buf)
223 {
224 	struct drm_device *ddev = dev_get_drvdata(dev);
225 	struct amdgpu_device *adev = drm_to_adev(ddev);
226 
227 	return sysfs_emit(buf, "%s\n", adev->serial);
228 }
229 
230 static DEVICE_ATTR(serial_number, S_IRUGO,
231 		amdgpu_device_get_serial_number, NULL);
232 
233 /**
234  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
235  *
236  * @dev: drm_device pointer
237  *
238  * Returns true if the device is a dGPU with ATPX power control,
239  * otherwise return false.
240  */
241 bool amdgpu_device_supports_px(struct drm_device *dev)
242 {
243 	struct amdgpu_device *adev = drm_to_adev(dev);
244 
245 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
246 		return true;
247 	return false;
248 }
249 
250 /**
251  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
252  *
253  * @dev: drm_device pointer
254  *
255  * Returns true if the device is a dGPU with ACPI power control,
256  * otherwise return false.
257  */
258 bool amdgpu_device_supports_boco(struct drm_device *dev)
259 {
260 	struct amdgpu_device *adev = drm_to_adev(dev);
261 
262 	if (adev->has_pr3 ||
263 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
264 		return true;
265 	return false;
266 }
267 
268 /**
269  * amdgpu_device_supports_baco - Does the device support BACO
270  *
271  * @dev: drm_device pointer
272  *
273  * Returns true if the device supporte BACO,
274  * otherwise return false.
275  */
276 bool amdgpu_device_supports_baco(struct drm_device *dev)
277 {
278 	struct amdgpu_device *adev = drm_to_adev(dev);
279 
280 	return amdgpu_asic_supports_baco(adev);
281 }
282 
283 /**
284  * amdgpu_device_supports_smart_shift - Is the device dGPU with
285  * smart shift support
286  *
287  * @dev: drm_device pointer
288  *
289  * Returns true if the device is a dGPU with Smart Shift support,
290  * otherwise returns false.
291  */
292 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
293 {
294 	return (amdgpu_device_supports_boco(dev) &&
295 		amdgpu_acpi_is_power_shift_control_supported());
296 }
297 
298 /*
299  * VRAM access helper functions
300  */
301 
302 /**
303  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
304  *
305  * @adev: amdgpu_device pointer
306  * @pos: offset of the buffer in vram
307  * @buf: virtual address of the buffer in system memory
308  * @size: read/write size, sizeof(@buf) must > @size
309  * @write: true - write to vram, otherwise - read from vram
310  */
311 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
312 			     void *buf, size_t size, bool write)
313 {
314 	unsigned long flags;
315 	uint32_t hi = ~0, tmp = 0;
316 	uint32_t *data = buf;
317 	uint64_t last;
318 	int idx;
319 
320 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
321 		return;
322 
323 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
324 
325 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
326 	for (last = pos + size; pos < last; pos += 4) {
327 		tmp = pos >> 31;
328 
329 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
330 		if (tmp != hi) {
331 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
332 			hi = tmp;
333 		}
334 		if (write)
335 			WREG32_NO_KIQ(mmMM_DATA, *data++);
336 		else
337 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
338 	}
339 
340 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
341 	drm_dev_exit(idx);
342 }
343 
344 /**
345  * amdgpu_device_aper_access - access vram by vram aperature
346  *
347  * @adev: amdgpu_device pointer
348  * @pos: offset of the buffer in vram
349  * @buf: virtual address of the buffer in system memory
350  * @size: read/write size, sizeof(@buf) must > @size
351  * @write: true - write to vram, otherwise - read from vram
352  *
353  * The return value means how many bytes have been transferred.
354  */
355 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
356 				 void *buf, size_t size, bool write)
357 {
358 #ifdef CONFIG_64BIT
359 	void __iomem *addr;
360 	size_t count = 0;
361 	uint64_t last;
362 
363 	if (!adev->mman.aper_base_kaddr)
364 		return 0;
365 
366 	last = min(pos + size, adev->gmc.visible_vram_size);
367 	if (last > pos) {
368 		addr = adev->mman.aper_base_kaddr + pos;
369 		count = last - pos;
370 
371 		if (write) {
372 			memcpy_toio(addr, buf, count);
373 			mb();
374 			amdgpu_device_flush_hdp(adev, NULL);
375 		} else {
376 			amdgpu_device_invalidate_hdp(adev, NULL);
377 			mb();
378 			memcpy_fromio(buf, addr, count);
379 		}
380 
381 	}
382 
383 	return count;
384 #else
385 	return 0;
386 #endif
387 }
388 
389 /**
390  * amdgpu_device_vram_access - read/write a buffer in vram
391  *
392  * @adev: amdgpu_device pointer
393  * @pos: offset of the buffer in vram
394  * @buf: virtual address of the buffer in system memory
395  * @size: read/write size, sizeof(@buf) must > @size
396  * @write: true - write to vram, otherwise - read from vram
397  */
398 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
399 			       void *buf, size_t size, bool write)
400 {
401 	size_t count;
402 
403 	/* try to using vram apreature to access vram first */
404 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
405 	size -= count;
406 	if (size) {
407 		/* using MM to access rest vram */
408 		pos += count;
409 		buf += count;
410 		amdgpu_device_mm_access(adev, pos, buf, size, write);
411 	}
412 }
413 
414 /*
415  * register access helper functions.
416  */
417 
418 /* Check if hw access should be skipped because of hotplug or device error */
419 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
420 {
421 	if (adev->no_hw_access)
422 		return true;
423 
424 #ifdef CONFIG_LOCKDEP
425 	/*
426 	 * This is a bit complicated to understand, so worth a comment. What we assert
427 	 * here is that the GPU reset is not running on another thread in parallel.
428 	 *
429 	 * For this we trylock the read side of the reset semaphore, if that succeeds
430 	 * we know that the reset is not running in paralell.
431 	 *
432 	 * If the trylock fails we assert that we are either already holding the read
433 	 * side of the lock or are the reset thread itself and hold the write side of
434 	 * the lock.
435 	 */
436 	if (in_task()) {
437 		if (down_read_trylock(&adev->reset_domain->sem))
438 			up_read(&adev->reset_domain->sem);
439 		else
440 			lockdep_assert_held(&adev->reset_domain->sem);
441 	}
442 #endif
443 	return false;
444 }
445 
446 /**
447  * amdgpu_device_rreg - read a memory mapped IO or indirect register
448  *
449  * @adev: amdgpu_device pointer
450  * @reg: dword aligned register offset
451  * @acc_flags: access flags which require special behavior
452  *
453  * Returns the 32 bit value from the offset specified.
454  */
455 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
456 			    uint32_t reg, uint32_t acc_flags)
457 {
458 	uint32_t ret;
459 
460 	if (amdgpu_device_skip_hw_access(adev))
461 		return 0;
462 
463 	if ((reg * 4) < adev->rmmio_size) {
464 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
465 		    amdgpu_sriov_runtime(adev) &&
466 		    down_read_trylock(&adev->reset_domain->sem)) {
467 			ret = amdgpu_kiq_rreg(adev, reg);
468 			up_read(&adev->reset_domain->sem);
469 		} else {
470 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
471 		}
472 	} else {
473 		ret = adev->pcie_rreg(adev, reg * 4);
474 	}
475 
476 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
477 
478 	return ret;
479 }
480 
481 /*
482  * MMIO register read with bytes helper functions
483  * @offset:bytes offset from MMIO start
484  *
485 */
486 
487 /**
488  * amdgpu_mm_rreg8 - read a memory mapped IO register
489  *
490  * @adev: amdgpu_device pointer
491  * @offset: byte aligned register offset
492  *
493  * Returns the 8 bit value from the offset specified.
494  */
495 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
496 {
497 	if (amdgpu_device_skip_hw_access(adev))
498 		return 0;
499 
500 	if (offset < adev->rmmio_size)
501 		return (readb(adev->rmmio + offset));
502 	BUG();
503 }
504 
505 /*
506  * MMIO register write with bytes helper functions
507  * @offset:bytes offset from MMIO start
508  * @value: the value want to be written to the register
509  *
510 */
511 /**
512  * amdgpu_mm_wreg8 - read a memory mapped IO register
513  *
514  * @adev: amdgpu_device pointer
515  * @offset: byte aligned register offset
516  * @value: 8 bit value to write
517  *
518  * Writes the value specified to the offset specified.
519  */
520 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
521 {
522 	if (amdgpu_device_skip_hw_access(adev))
523 		return;
524 
525 	if (offset < adev->rmmio_size)
526 		writeb(value, adev->rmmio + offset);
527 	else
528 		BUG();
529 }
530 
531 /**
532  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
533  *
534  * @adev: amdgpu_device pointer
535  * @reg: dword aligned register offset
536  * @v: 32 bit value to write to the register
537  * @acc_flags: access flags which require special behavior
538  *
539  * Writes the value specified to the offset specified.
540  */
541 void amdgpu_device_wreg(struct amdgpu_device *adev,
542 			uint32_t reg, uint32_t v,
543 			uint32_t acc_flags)
544 {
545 	if (amdgpu_device_skip_hw_access(adev))
546 		return;
547 
548 	if ((reg * 4) < adev->rmmio_size) {
549 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
550 		    amdgpu_sriov_runtime(adev) &&
551 		    down_read_trylock(&adev->reset_domain->sem)) {
552 			amdgpu_kiq_wreg(adev, reg, v);
553 			up_read(&adev->reset_domain->sem);
554 		} else {
555 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
556 		}
557 	} else {
558 		adev->pcie_wreg(adev, reg * 4, v);
559 	}
560 
561 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
562 }
563 
564 /**
565  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
566  *
567  * @adev: amdgpu_device pointer
568  * @reg: mmio/rlc register
569  * @v: value to write
570  *
571  * this function is invoked only for the debugfs register access
572  */
573 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
574 			     uint32_t reg, uint32_t v)
575 {
576 	if (amdgpu_device_skip_hw_access(adev))
577 		return;
578 
579 	if (amdgpu_sriov_fullaccess(adev) &&
580 	    adev->gfx.rlc.funcs &&
581 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
582 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
583 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
584 	} else if ((reg * 4) >= adev->rmmio_size) {
585 		adev->pcie_wreg(adev, reg * 4, v);
586 	} else {
587 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
588 	}
589 }
590 
591 /**
592  * amdgpu_mm_rdoorbell - read a doorbell dword
593  *
594  * @adev: amdgpu_device pointer
595  * @index: doorbell index
596  *
597  * Returns the value in the doorbell aperture at the
598  * requested doorbell index (CIK).
599  */
600 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
601 {
602 	if (amdgpu_device_skip_hw_access(adev))
603 		return 0;
604 
605 	if (index < adev->doorbell.num_kernel_doorbells) {
606 		return readl(adev->doorbell.ptr + index);
607 	} else {
608 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
609 		return 0;
610 	}
611 }
612 
613 /**
614  * amdgpu_mm_wdoorbell - write a doorbell dword
615  *
616  * @adev: amdgpu_device pointer
617  * @index: doorbell index
618  * @v: value to write
619  *
620  * Writes @v to the doorbell aperture at the
621  * requested doorbell index (CIK).
622  */
623 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
624 {
625 	if (amdgpu_device_skip_hw_access(adev))
626 		return;
627 
628 	if (index < adev->doorbell.num_kernel_doorbells) {
629 		writel(v, adev->doorbell.ptr + index);
630 	} else {
631 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
632 	}
633 }
634 
635 /**
636  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
637  *
638  * @adev: amdgpu_device pointer
639  * @index: doorbell index
640  *
641  * Returns the value in the doorbell aperture at the
642  * requested doorbell index (VEGA10+).
643  */
644 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
645 {
646 	if (amdgpu_device_skip_hw_access(adev))
647 		return 0;
648 
649 	if (index < adev->doorbell.num_kernel_doorbells) {
650 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
651 	} else {
652 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
653 		return 0;
654 	}
655 }
656 
657 /**
658  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
659  *
660  * @adev: amdgpu_device pointer
661  * @index: doorbell index
662  * @v: value to write
663  *
664  * Writes @v to the doorbell aperture at the
665  * requested doorbell index (VEGA10+).
666  */
667 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
668 {
669 	if (amdgpu_device_skip_hw_access(adev))
670 		return;
671 
672 	if (index < adev->doorbell.num_kernel_doorbells) {
673 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
674 	} else {
675 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
676 	}
677 }
678 
679 /**
680  * amdgpu_device_indirect_rreg - read an indirect register
681  *
682  * @adev: amdgpu_device pointer
683  * @reg_addr: indirect register address to read from
684  *
685  * Returns the value of indirect register @reg_addr
686  */
687 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
688 				u32 reg_addr)
689 {
690 	unsigned long flags, pcie_index, pcie_data;
691 	void __iomem *pcie_index_offset;
692 	void __iomem *pcie_data_offset;
693 	u32 r;
694 
695 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
696 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
697 
698 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
699 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
700 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
701 
702 	writel(reg_addr, pcie_index_offset);
703 	readl(pcie_index_offset);
704 	r = readl(pcie_data_offset);
705 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
706 
707 	return r;
708 }
709 
710 /**
711  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
712  *
713  * @adev: amdgpu_device pointer
714  * @reg_addr: indirect register address to read from
715  *
716  * Returns the value of indirect register @reg_addr
717  */
718 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
719 				  u32 reg_addr)
720 {
721 	unsigned long flags, pcie_index, pcie_data;
722 	void __iomem *pcie_index_offset;
723 	void __iomem *pcie_data_offset;
724 	u64 r;
725 
726 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
727 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
728 
729 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
730 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
731 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
732 
733 	/* read low 32 bits */
734 	writel(reg_addr, pcie_index_offset);
735 	readl(pcie_index_offset);
736 	r = readl(pcie_data_offset);
737 	/* read high 32 bits */
738 	writel(reg_addr + 4, pcie_index_offset);
739 	readl(pcie_index_offset);
740 	r |= ((u64)readl(pcie_data_offset) << 32);
741 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
742 
743 	return r;
744 }
745 
746 /**
747  * amdgpu_device_indirect_wreg - write an indirect register address
748  *
749  * @adev: amdgpu_device pointer
750  * @pcie_index: mmio register offset
751  * @pcie_data: mmio register offset
752  * @reg_addr: indirect register offset
753  * @reg_data: indirect register data
754  *
755  */
756 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
757 				 u32 reg_addr, u32 reg_data)
758 {
759 	unsigned long flags, pcie_index, pcie_data;
760 	void __iomem *pcie_index_offset;
761 	void __iomem *pcie_data_offset;
762 
763 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
764 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
765 
766 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
767 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
768 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
769 
770 	writel(reg_addr, pcie_index_offset);
771 	readl(pcie_index_offset);
772 	writel(reg_data, pcie_data_offset);
773 	readl(pcie_data_offset);
774 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
775 }
776 
777 /**
778  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
779  *
780  * @adev: amdgpu_device pointer
781  * @pcie_index: mmio register offset
782  * @pcie_data: mmio register offset
783  * @reg_addr: indirect register offset
784  * @reg_data: indirect register data
785  *
786  */
787 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
788 				   u32 reg_addr, u64 reg_data)
789 {
790 	unsigned long flags, pcie_index, pcie_data;
791 	void __iomem *pcie_index_offset;
792 	void __iomem *pcie_data_offset;
793 
794 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
795 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
796 
797 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
798 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
799 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
800 
801 	/* write low 32 bits */
802 	writel(reg_addr, pcie_index_offset);
803 	readl(pcie_index_offset);
804 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
805 	readl(pcie_data_offset);
806 	/* write high 32 bits */
807 	writel(reg_addr + 4, pcie_index_offset);
808 	readl(pcie_index_offset);
809 	writel((u32)(reg_data >> 32), pcie_data_offset);
810 	readl(pcie_data_offset);
811 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
812 }
813 
814 /**
815  * amdgpu_device_get_rev_id - query device rev_id
816  *
817  * @adev: amdgpu_device pointer
818  *
819  * Return device rev_id
820  */
821 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
822 {
823 	return adev->nbio.funcs->get_rev_id(adev);
824 }
825 
826 /**
827  * amdgpu_invalid_rreg - dummy reg read function
828  *
829  * @adev: amdgpu_device pointer
830  * @reg: offset of register
831  *
832  * Dummy register read function.  Used for register blocks
833  * that certain asics don't have (all asics).
834  * Returns the value in the register.
835  */
836 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
837 {
838 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
839 	BUG();
840 	return 0;
841 }
842 
843 /**
844  * amdgpu_invalid_wreg - dummy reg write function
845  *
846  * @adev: amdgpu_device pointer
847  * @reg: offset of register
848  * @v: value to write to the register
849  *
850  * Dummy register read function.  Used for register blocks
851  * that certain asics don't have (all asics).
852  */
853 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
854 {
855 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
856 		  reg, v);
857 	BUG();
858 }
859 
860 /**
861  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
862  *
863  * @adev: amdgpu_device pointer
864  * @reg: offset of register
865  *
866  * Dummy register read function.  Used for register blocks
867  * that certain asics don't have (all asics).
868  * Returns the value in the register.
869  */
870 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
871 {
872 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
873 	BUG();
874 	return 0;
875 }
876 
877 /**
878  * amdgpu_invalid_wreg64 - dummy reg write function
879  *
880  * @adev: amdgpu_device pointer
881  * @reg: offset of register
882  * @v: value to write to the register
883  *
884  * Dummy register read function.  Used for register blocks
885  * that certain asics don't have (all asics).
886  */
887 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
888 {
889 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
890 		  reg, v);
891 	BUG();
892 }
893 
894 /**
895  * amdgpu_block_invalid_rreg - dummy reg read function
896  *
897  * @adev: amdgpu_device pointer
898  * @block: offset of instance
899  * @reg: offset of register
900  *
901  * Dummy register read function.  Used for register blocks
902  * that certain asics don't have (all asics).
903  * Returns the value in the register.
904  */
905 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
906 					  uint32_t block, uint32_t reg)
907 {
908 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
909 		  reg, block);
910 	BUG();
911 	return 0;
912 }
913 
914 /**
915  * amdgpu_block_invalid_wreg - dummy reg write function
916  *
917  * @adev: amdgpu_device pointer
918  * @block: offset of instance
919  * @reg: offset of register
920  * @v: value to write to the register
921  *
922  * Dummy register read function.  Used for register blocks
923  * that certain asics don't have (all asics).
924  */
925 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
926 				      uint32_t block,
927 				      uint32_t reg, uint32_t v)
928 {
929 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
930 		  reg, block, v);
931 	BUG();
932 }
933 
934 /**
935  * amdgpu_device_asic_init - Wrapper for atom asic_init
936  *
937  * @adev: amdgpu_device pointer
938  *
939  * Does any asic specific work and then calls atom asic init.
940  */
941 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
942 {
943 	amdgpu_asic_pre_asic_init(adev);
944 
945 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
946 		return amdgpu_atomfirmware_asic_init(adev, true);
947 	else
948 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
949 }
950 
951 /**
952  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
953  *
954  * @adev: amdgpu_device pointer
955  *
956  * Allocates a scratch page of VRAM for use by various things in the
957  * driver.
958  */
959 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
960 {
961 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
962 				       AMDGPU_GEM_DOMAIN_VRAM |
963 				       AMDGPU_GEM_DOMAIN_GTT,
964 				       &adev->mem_scratch.robj,
965 				       &adev->mem_scratch.gpu_addr,
966 				       (void **)&adev->mem_scratch.ptr);
967 }
968 
969 /**
970  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
971  *
972  * @adev: amdgpu_device pointer
973  *
974  * Frees the VRAM scratch page.
975  */
976 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
977 {
978 	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
979 }
980 
981 /**
982  * amdgpu_device_program_register_sequence - program an array of registers.
983  *
984  * @adev: amdgpu_device pointer
985  * @registers: pointer to the register array
986  * @array_size: size of the register array
987  *
988  * Programs an array or registers with and and or masks.
989  * This is a helper for setting golden registers.
990  */
991 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
992 					     const u32 *registers,
993 					     const u32 array_size)
994 {
995 	u32 tmp, reg, and_mask, or_mask;
996 	int i;
997 
998 	if (array_size % 3)
999 		return;
1000 
1001 	for (i = 0; i < array_size; i +=3) {
1002 		reg = registers[i + 0];
1003 		and_mask = registers[i + 1];
1004 		or_mask = registers[i + 2];
1005 
1006 		if (and_mask == 0xffffffff) {
1007 			tmp = or_mask;
1008 		} else {
1009 			tmp = RREG32(reg);
1010 			tmp &= ~and_mask;
1011 			if (adev->family >= AMDGPU_FAMILY_AI)
1012 				tmp |= (or_mask & and_mask);
1013 			else
1014 				tmp |= or_mask;
1015 		}
1016 		WREG32(reg, tmp);
1017 	}
1018 }
1019 
1020 /**
1021  * amdgpu_device_pci_config_reset - reset the GPU
1022  *
1023  * @adev: amdgpu_device pointer
1024  *
1025  * Resets the GPU using the pci config reset sequence.
1026  * Only applicable to asics prior to vega10.
1027  */
1028 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1029 {
1030 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1031 }
1032 
1033 /**
1034  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1035  *
1036  * @adev: amdgpu_device pointer
1037  *
1038  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1039  */
1040 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1041 {
1042 	return pci_reset_function(adev->pdev);
1043 }
1044 
1045 /*
1046  * GPU doorbell aperture helpers function.
1047  */
1048 /**
1049  * amdgpu_device_doorbell_init - Init doorbell driver information.
1050  *
1051  * @adev: amdgpu_device pointer
1052  *
1053  * Init doorbell driver information (CIK)
1054  * Returns 0 on success, error on failure.
1055  */
1056 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1057 {
1058 
1059 	/* No doorbell on SI hardware generation */
1060 	if (adev->asic_type < CHIP_BONAIRE) {
1061 		adev->doorbell.base = 0;
1062 		adev->doorbell.size = 0;
1063 		adev->doorbell.num_kernel_doorbells = 0;
1064 		adev->doorbell.ptr = NULL;
1065 		return 0;
1066 	}
1067 
1068 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1069 		return -EINVAL;
1070 
1071 	amdgpu_asic_init_doorbell_index(adev);
1072 
1073 	/* doorbell bar mapping */
1074 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1075 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1076 
1077 	if (adev->enable_mes) {
1078 		adev->doorbell.num_kernel_doorbells =
1079 			adev->doorbell.size / sizeof(u32);
1080 	} else {
1081 		adev->doorbell.num_kernel_doorbells =
1082 			min_t(u32, adev->doorbell.size / sizeof(u32),
1083 			      adev->doorbell_index.max_assignment+1);
1084 		if (adev->doorbell.num_kernel_doorbells == 0)
1085 			return -EINVAL;
1086 
1087 		/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1088 		 * paging queue doorbell use the second page. The
1089 		 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1090 		 * doorbells are in the first page. So with paging queue enabled,
1091 		 * the max num_kernel_doorbells should + 1 page (0x400 in dword)
1092 		 */
1093 		if (adev->asic_type >= CHIP_VEGA10)
1094 			adev->doorbell.num_kernel_doorbells += 0x400;
1095 	}
1096 
1097 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1098 				     adev->doorbell.num_kernel_doorbells *
1099 				     sizeof(u32));
1100 	if (adev->doorbell.ptr == NULL)
1101 		return -ENOMEM;
1102 
1103 	return 0;
1104 }
1105 
1106 /**
1107  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1108  *
1109  * @adev: amdgpu_device pointer
1110  *
1111  * Tear down doorbell driver information (CIK)
1112  */
1113 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1114 {
1115 	iounmap(adev->doorbell.ptr);
1116 	adev->doorbell.ptr = NULL;
1117 }
1118 
1119 
1120 
1121 /*
1122  * amdgpu_device_wb_*()
1123  * Writeback is the method by which the GPU updates special pages in memory
1124  * with the status of certain GPU events (fences, ring pointers,etc.).
1125  */
1126 
1127 /**
1128  * amdgpu_device_wb_fini - Disable Writeback and free memory
1129  *
1130  * @adev: amdgpu_device pointer
1131  *
1132  * Disables Writeback and frees the Writeback memory (all asics).
1133  * Used at driver shutdown.
1134  */
1135 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1136 {
1137 	if (adev->wb.wb_obj) {
1138 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1139 				      &adev->wb.gpu_addr,
1140 				      (void **)&adev->wb.wb);
1141 		adev->wb.wb_obj = NULL;
1142 	}
1143 }
1144 
1145 /**
1146  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1147  *
1148  * @adev: amdgpu_device pointer
1149  *
1150  * Initializes writeback and allocates writeback memory (all asics).
1151  * Used at driver startup.
1152  * Returns 0 on success or an -error on failure.
1153  */
1154 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1155 {
1156 	int r;
1157 
1158 	if (adev->wb.wb_obj == NULL) {
1159 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1160 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1161 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1162 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1163 					    (void **)&adev->wb.wb);
1164 		if (r) {
1165 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1166 			return r;
1167 		}
1168 
1169 		adev->wb.num_wb = AMDGPU_MAX_WB;
1170 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1171 
1172 		/* clear wb memory */
1173 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1174 	}
1175 
1176 	return 0;
1177 }
1178 
1179 /**
1180  * amdgpu_device_wb_get - Allocate a wb entry
1181  *
1182  * @adev: amdgpu_device pointer
1183  * @wb: wb index
1184  *
1185  * Allocate a wb slot for use by the driver (all asics).
1186  * Returns 0 on success or -EINVAL on failure.
1187  */
1188 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1189 {
1190 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1191 
1192 	if (offset < adev->wb.num_wb) {
1193 		__set_bit(offset, adev->wb.used);
1194 		*wb = offset << 3; /* convert to dw offset */
1195 		return 0;
1196 	} else {
1197 		return -EINVAL;
1198 	}
1199 }
1200 
1201 /**
1202  * amdgpu_device_wb_free - Free a wb entry
1203  *
1204  * @adev: amdgpu_device pointer
1205  * @wb: wb index
1206  *
1207  * Free a wb slot allocated for use by the driver (all asics)
1208  */
1209 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1210 {
1211 	wb >>= 3;
1212 	if (wb < adev->wb.num_wb)
1213 		__clear_bit(wb, adev->wb.used);
1214 }
1215 
1216 /**
1217  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1218  *
1219  * @adev: amdgpu_device pointer
1220  *
1221  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1222  * to fail, but if any of the BARs is not accessible after the size we abort
1223  * driver loading by returning -ENODEV.
1224  */
1225 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1226 {
1227 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1228 	struct pci_bus *root;
1229 	struct resource *res;
1230 	unsigned i;
1231 	u16 cmd;
1232 	int r;
1233 
1234 	/* Bypass for VF */
1235 	if (amdgpu_sriov_vf(adev))
1236 		return 0;
1237 
1238 	/* skip if the bios has already enabled large BAR */
1239 	if (adev->gmc.real_vram_size &&
1240 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1241 		return 0;
1242 
1243 	/* Check if the root BUS has 64bit memory resources */
1244 	root = adev->pdev->bus;
1245 	while (root->parent)
1246 		root = root->parent;
1247 
1248 	pci_bus_for_each_resource(root, res, i) {
1249 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1250 		    res->start > 0x100000000ull)
1251 			break;
1252 	}
1253 
1254 	/* Trying to resize is pointless without a root hub window above 4GB */
1255 	if (!res)
1256 		return 0;
1257 
1258 	/* Limit the BAR size to what is available */
1259 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1260 			rbar_size);
1261 
1262 	/* Disable memory decoding while we change the BAR addresses and size */
1263 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1264 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1265 			      cmd & ~PCI_COMMAND_MEMORY);
1266 
1267 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1268 	amdgpu_device_doorbell_fini(adev);
1269 	if (adev->asic_type >= CHIP_BONAIRE)
1270 		pci_release_resource(adev->pdev, 2);
1271 
1272 	pci_release_resource(adev->pdev, 0);
1273 
1274 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1275 	if (r == -ENOSPC)
1276 		DRM_INFO("Not enough PCI address space for a large BAR.");
1277 	else if (r && r != -ENOTSUPP)
1278 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1279 
1280 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1281 
1282 	/* When the doorbell or fb BAR isn't available we have no chance of
1283 	 * using the device.
1284 	 */
1285 	r = amdgpu_device_doorbell_init(adev);
1286 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1287 		return -ENODEV;
1288 
1289 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1290 
1291 	return 0;
1292 }
1293 
1294 /*
1295  * GPU helpers function.
1296  */
1297 /**
1298  * amdgpu_device_need_post - check if the hw need post or not
1299  *
1300  * @adev: amdgpu_device pointer
1301  *
1302  * Check if the asic has been initialized (all asics) at driver startup
1303  * or post is needed if  hw reset is performed.
1304  * Returns true if need or false if not.
1305  */
1306 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1307 {
1308 	uint32_t reg;
1309 
1310 	if (amdgpu_sriov_vf(adev))
1311 		return false;
1312 
1313 	if (amdgpu_passthrough(adev)) {
1314 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1315 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1316 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1317 		 * vpost executed for smc version below 22.15
1318 		 */
1319 		if (adev->asic_type == CHIP_FIJI) {
1320 			int err;
1321 			uint32_t fw_ver;
1322 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1323 			/* force vPost if error occured */
1324 			if (err)
1325 				return true;
1326 
1327 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1328 			if (fw_ver < 0x00160e00)
1329 				return true;
1330 		}
1331 	}
1332 
1333 	/* Don't post if we need to reset whole hive on init */
1334 	if (adev->gmc.xgmi.pending_reset)
1335 		return false;
1336 
1337 	if (adev->has_hw_reset) {
1338 		adev->has_hw_reset = false;
1339 		return true;
1340 	}
1341 
1342 	/* bios scratch used on CIK+ */
1343 	if (adev->asic_type >= CHIP_BONAIRE)
1344 		return amdgpu_atombios_scratch_need_asic_init(adev);
1345 
1346 	/* check MEM_SIZE for older asics */
1347 	reg = amdgpu_asic_get_config_memsize(adev);
1348 
1349 	if ((reg != 0) && (reg != 0xffffffff))
1350 		return false;
1351 
1352 	return true;
1353 }
1354 
1355 /**
1356  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1357  *
1358  * @adev: amdgpu_device pointer
1359  *
1360  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1361  * be set for this device.
1362  *
1363  * Returns true if it should be used or false if not.
1364  */
1365 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1366 {
1367 	switch (amdgpu_aspm) {
1368 	case -1:
1369 		break;
1370 	case 0:
1371 		return false;
1372 	case 1:
1373 		return true;
1374 	default:
1375 		return false;
1376 	}
1377 	return pcie_aspm_enabled(adev->pdev);
1378 }
1379 
1380 bool amdgpu_device_aspm_support_quirk(void)
1381 {
1382 #if IS_ENABLED(CONFIG_X86)
1383 	struct cpuinfo_x86 *c = &cpu_data(0);
1384 
1385 	return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1386 #else
1387 	return true;
1388 #endif
1389 }
1390 
1391 /* if we get transitioned to only one device, take VGA back */
1392 /**
1393  * amdgpu_device_vga_set_decode - enable/disable vga decode
1394  *
1395  * @pdev: PCI device pointer
1396  * @state: enable/disable vga decode
1397  *
1398  * Enable/disable vga decode (all asics).
1399  * Returns VGA resource flags.
1400  */
1401 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1402 		bool state)
1403 {
1404 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1405 	amdgpu_asic_set_vga_state(adev, state);
1406 	if (state)
1407 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1408 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1409 	else
1410 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1411 }
1412 
1413 /**
1414  * amdgpu_device_check_block_size - validate the vm block size
1415  *
1416  * @adev: amdgpu_device pointer
1417  *
1418  * Validates the vm block size specified via module parameter.
1419  * The vm block size defines number of bits in page table versus page directory,
1420  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1421  * page table and the remaining bits are in the page directory.
1422  */
1423 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1424 {
1425 	/* defines number of bits in page table versus page directory,
1426 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1427 	 * page table and the remaining bits are in the page directory */
1428 	if (amdgpu_vm_block_size == -1)
1429 		return;
1430 
1431 	if (amdgpu_vm_block_size < 9) {
1432 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1433 			 amdgpu_vm_block_size);
1434 		amdgpu_vm_block_size = -1;
1435 	}
1436 }
1437 
1438 /**
1439  * amdgpu_device_check_vm_size - validate the vm size
1440  *
1441  * @adev: amdgpu_device pointer
1442  *
1443  * Validates the vm size in GB specified via module parameter.
1444  * The VM size is the size of the GPU virtual memory space in GB.
1445  */
1446 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1447 {
1448 	/* no need to check the default value */
1449 	if (amdgpu_vm_size == -1)
1450 		return;
1451 
1452 	if (amdgpu_vm_size < 1) {
1453 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1454 			 amdgpu_vm_size);
1455 		amdgpu_vm_size = -1;
1456 	}
1457 }
1458 
1459 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1460 {
1461 	struct sysinfo si;
1462 	bool is_os_64 = (sizeof(void *) == 8);
1463 	uint64_t total_memory;
1464 	uint64_t dram_size_seven_GB = 0x1B8000000;
1465 	uint64_t dram_size_three_GB = 0xB8000000;
1466 
1467 	if (amdgpu_smu_memory_pool_size == 0)
1468 		return;
1469 
1470 	if (!is_os_64) {
1471 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1472 		goto def_value;
1473 	}
1474 	si_meminfo(&si);
1475 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1476 
1477 	if ((amdgpu_smu_memory_pool_size == 1) ||
1478 		(amdgpu_smu_memory_pool_size == 2)) {
1479 		if (total_memory < dram_size_three_GB)
1480 			goto def_value1;
1481 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1482 		(amdgpu_smu_memory_pool_size == 8)) {
1483 		if (total_memory < dram_size_seven_GB)
1484 			goto def_value1;
1485 	} else {
1486 		DRM_WARN("Smu memory pool size not supported\n");
1487 		goto def_value;
1488 	}
1489 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1490 
1491 	return;
1492 
1493 def_value1:
1494 	DRM_WARN("No enough system memory\n");
1495 def_value:
1496 	adev->pm.smu_prv_buffer_size = 0;
1497 }
1498 
1499 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1500 {
1501 	if (!(adev->flags & AMD_IS_APU) ||
1502 	    adev->asic_type < CHIP_RAVEN)
1503 		return 0;
1504 
1505 	switch (adev->asic_type) {
1506 	case CHIP_RAVEN:
1507 		if (adev->pdev->device == 0x15dd)
1508 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1509 		if (adev->pdev->device == 0x15d8)
1510 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1511 		break;
1512 	case CHIP_RENOIR:
1513 		if ((adev->pdev->device == 0x1636) ||
1514 		    (adev->pdev->device == 0x164c))
1515 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1516 		else
1517 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1518 		break;
1519 	case CHIP_VANGOGH:
1520 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1521 		break;
1522 	case CHIP_YELLOW_CARP:
1523 		break;
1524 	case CHIP_CYAN_SKILLFISH:
1525 		if ((adev->pdev->device == 0x13FE) ||
1526 		    (adev->pdev->device == 0x143F))
1527 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1528 		break;
1529 	default:
1530 		break;
1531 	}
1532 
1533 	return 0;
1534 }
1535 
1536 /**
1537  * amdgpu_device_check_arguments - validate module params
1538  *
1539  * @adev: amdgpu_device pointer
1540  *
1541  * Validates certain module parameters and updates
1542  * the associated values used by the driver (all asics).
1543  */
1544 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1545 {
1546 	if (amdgpu_sched_jobs < 4) {
1547 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1548 			 amdgpu_sched_jobs);
1549 		amdgpu_sched_jobs = 4;
1550 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1551 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1552 			 amdgpu_sched_jobs);
1553 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1554 	}
1555 
1556 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1557 		/* gart size must be greater or equal to 32M */
1558 		dev_warn(adev->dev, "gart size (%d) too small\n",
1559 			 amdgpu_gart_size);
1560 		amdgpu_gart_size = -1;
1561 	}
1562 
1563 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1564 		/* gtt size must be greater or equal to 32M */
1565 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1566 				 amdgpu_gtt_size);
1567 		amdgpu_gtt_size = -1;
1568 	}
1569 
1570 	/* valid range is between 4 and 9 inclusive */
1571 	if (amdgpu_vm_fragment_size != -1 &&
1572 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1573 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1574 		amdgpu_vm_fragment_size = -1;
1575 	}
1576 
1577 	if (amdgpu_sched_hw_submission < 2) {
1578 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1579 			 amdgpu_sched_hw_submission);
1580 		amdgpu_sched_hw_submission = 2;
1581 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1582 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1583 			 amdgpu_sched_hw_submission);
1584 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1585 	}
1586 
1587 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1588 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1589 		amdgpu_reset_method = -1;
1590 	}
1591 
1592 	amdgpu_device_check_smu_prv_buffer_size(adev);
1593 
1594 	amdgpu_device_check_vm_size(adev);
1595 
1596 	amdgpu_device_check_block_size(adev);
1597 
1598 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1599 
1600 	return 0;
1601 }
1602 
1603 /**
1604  * amdgpu_switcheroo_set_state - set switcheroo state
1605  *
1606  * @pdev: pci dev pointer
1607  * @state: vga_switcheroo state
1608  *
1609  * Callback for the switcheroo driver.  Suspends or resumes
1610  * the asics before or after it is powered up using ACPI methods.
1611  */
1612 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1613 					enum vga_switcheroo_state state)
1614 {
1615 	struct drm_device *dev = pci_get_drvdata(pdev);
1616 	int r;
1617 
1618 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1619 		return;
1620 
1621 	if (state == VGA_SWITCHEROO_ON) {
1622 		pr_info("switched on\n");
1623 		/* don't suspend or resume card normally */
1624 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1625 
1626 		pci_set_power_state(pdev, PCI_D0);
1627 		amdgpu_device_load_pci_state(pdev);
1628 		r = pci_enable_device(pdev);
1629 		if (r)
1630 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1631 		amdgpu_device_resume(dev, true);
1632 
1633 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1634 	} else {
1635 		pr_info("switched off\n");
1636 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1637 		amdgpu_device_suspend(dev, true);
1638 		amdgpu_device_cache_pci_state(pdev);
1639 		/* Shut down the device */
1640 		pci_disable_device(pdev);
1641 		pci_set_power_state(pdev, PCI_D3cold);
1642 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1643 	}
1644 }
1645 
1646 /**
1647  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1648  *
1649  * @pdev: pci dev pointer
1650  *
1651  * Callback for the switcheroo driver.  Check of the switcheroo
1652  * state can be changed.
1653  * Returns true if the state can be changed, false if not.
1654  */
1655 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1656 {
1657 	struct drm_device *dev = pci_get_drvdata(pdev);
1658 
1659 	/*
1660 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1661 	* locking inversion with the driver load path. And the access here is
1662 	* completely racy anyway. So don't bother with locking for now.
1663 	*/
1664 	return atomic_read(&dev->open_count) == 0;
1665 }
1666 
1667 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1668 	.set_gpu_state = amdgpu_switcheroo_set_state,
1669 	.reprobe = NULL,
1670 	.can_switch = amdgpu_switcheroo_can_switch,
1671 };
1672 
1673 /**
1674  * amdgpu_device_ip_set_clockgating_state - set the CG state
1675  *
1676  * @dev: amdgpu_device pointer
1677  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1678  * @state: clockgating state (gate or ungate)
1679  *
1680  * Sets the requested clockgating state for all instances of
1681  * the hardware IP specified.
1682  * Returns the error code from the last instance.
1683  */
1684 int amdgpu_device_ip_set_clockgating_state(void *dev,
1685 					   enum amd_ip_block_type block_type,
1686 					   enum amd_clockgating_state state)
1687 {
1688 	struct amdgpu_device *adev = dev;
1689 	int i, r = 0;
1690 
1691 	for (i = 0; i < adev->num_ip_blocks; i++) {
1692 		if (!adev->ip_blocks[i].status.valid)
1693 			continue;
1694 		if (adev->ip_blocks[i].version->type != block_type)
1695 			continue;
1696 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1697 			continue;
1698 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1699 			(void *)adev, state);
1700 		if (r)
1701 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1702 				  adev->ip_blocks[i].version->funcs->name, r);
1703 	}
1704 	return r;
1705 }
1706 
1707 /**
1708  * amdgpu_device_ip_set_powergating_state - set the PG state
1709  *
1710  * @dev: amdgpu_device pointer
1711  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1712  * @state: powergating state (gate or ungate)
1713  *
1714  * Sets the requested powergating state for all instances of
1715  * the hardware IP specified.
1716  * Returns the error code from the last instance.
1717  */
1718 int amdgpu_device_ip_set_powergating_state(void *dev,
1719 					   enum amd_ip_block_type block_type,
1720 					   enum amd_powergating_state state)
1721 {
1722 	struct amdgpu_device *adev = dev;
1723 	int i, r = 0;
1724 
1725 	for (i = 0; i < adev->num_ip_blocks; i++) {
1726 		if (!adev->ip_blocks[i].status.valid)
1727 			continue;
1728 		if (adev->ip_blocks[i].version->type != block_type)
1729 			continue;
1730 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1731 			continue;
1732 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1733 			(void *)adev, state);
1734 		if (r)
1735 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1736 				  adev->ip_blocks[i].version->funcs->name, r);
1737 	}
1738 	return r;
1739 }
1740 
1741 /**
1742  * amdgpu_device_ip_get_clockgating_state - get the CG state
1743  *
1744  * @adev: amdgpu_device pointer
1745  * @flags: clockgating feature flags
1746  *
1747  * Walks the list of IPs on the device and updates the clockgating
1748  * flags for each IP.
1749  * Updates @flags with the feature flags for each hardware IP where
1750  * clockgating is enabled.
1751  */
1752 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1753 					    u64 *flags)
1754 {
1755 	int i;
1756 
1757 	for (i = 0; i < adev->num_ip_blocks; i++) {
1758 		if (!adev->ip_blocks[i].status.valid)
1759 			continue;
1760 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1761 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1762 	}
1763 }
1764 
1765 /**
1766  * amdgpu_device_ip_wait_for_idle - wait for idle
1767  *
1768  * @adev: amdgpu_device pointer
1769  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1770  *
1771  * Waits for the request hardware IP to be idle.
1772  * Returns 0 for success or a negative error code on failure.
1773  */
1774 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1775 				   enum amd_ip_block_type block_type)
1776 {
1777 	int i, r;
1778 
1779 	for (i = 0; i < adev->num_ip_blocks; i++) {
1780 		if (!adev->ip_blocks[i].status.valid)
1781 			continue;
1782 		if (adev->ip_blocks[i].version->type == block_type) {
1783 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1784 			if (r)
1785 				return r;
1786 			break;
1787 		}
1788 	}
1789 	return 0;
1790 
1791 }
1792 
1793 /**
1794  * amdgpu_device_ip_is_idle - is the hardware IP idle
1795  *
1796  * @adev: amdgpu_device pointer
1797  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1798  *
1799  * Check if the hardware IP is idle or not.
1800  * Returns true if it the IP is idle, false if not.
1801  */
1802 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1803 			      enum amd_ip_block_type block_type)
1804 {
1805 	int i;
1806 
1807 	for (i = 0; i < adev->num_ip_blocks; i++) {
1808 		if (!adev->ip_blocks[i].status.valid)
1809 			continue;
1810 		if (adev->ip_blocks[i].version->type == block_type)
1811 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1812 	}
1813 	return true;
1814 
1815 }
1816 
1817 /**
1818  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1819  *
1820  * @adev: amdgpu_device pointer
1821  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1822  *
1823  * Returns a pointer to the hardware IP block structure
1824  * if it exists for the asic, otherwise NULL.
1825  */
1826 struct amdgpu_ip_block *
1827 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1828 			      enum amd_ip_block_type type)
1829 {
1830 	int i;
1831 
1832 	for (i = 0; i < adev->num_ip_blocks; i++)
1833 		if (adev->ip_blocks[i].version->type == type)
1834 			return &adev->ip_blocks[i];
1835 
1836 	return NULL;
1837 }
1838 
1839 /**
1840  * amdgpu_device_ip_block_version_cmp
1841  *
1842  * @adev: amdgpu_device pointer
1843  * @type: enum amd_ip_block_type
1844  * @major: major version
1845  * @minor: minor version
1846  *
1847  * return 0 if equal or greater
1848  * return 1 if smaller or the ip_block doesn't exist
1849  */
1850 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1851 				       enum amd_ip_block_type type,
1852 				       u32 major, u32 minor)
1853 {
1854 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1855 
1856 	if (ip_block && ((ip_block->version->major > major) ||
1857 			((ip_block->version->major == major) &&
1858 			(ip_block->version->minor >= minor))))
1859 		return 0;
1860 
1861 	return 1;
1862 }
1863 
1864 /**
1865  * amdgpu_device_ip_block_add
1866  *
1867  * @adev: amdgpu_device pointer
1868  * @ip_block_version: pointer to the IP to add
1869  *
1870  * Adds the IP block driver information to the collection of IPs
1871  * on the asic.
1872  */
1873 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1874 			       const struct amdgpu_ip_block_version *ip_block_version)
1875 {
1876 	if (!ip_block_version)
1877 		return -EINVAL;
1878 
1879 	switch (ip_block_version->type) {
1880 	case AMD_IP_BLOCK_TYPE_VCN:
1881 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1882 			return 0;
1883 		break;
1884 	case AMD_IP_BLOCK_TYPE_JPEG:
1885 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1886 			return 0;
1887 		break;
1888 	default:
1889 		break;
1890 	}
1891 
1892 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1893 		  ip_block_version->funcs->name);
1894 
1895 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1896 
1897 	return 0;
1898 }
1899 
1900 /**
1901  * amdgpu_device_enable_virtual_display - enable virtual display feature
1902  *
1903  * @adev: amdgpu_device pointer
1904  *
1905  * Enabled the virtual display feature if the user has enabled it via
1906  * the module parameter virtual_display.  This feature provides a virtual
1907  * display hardware on headless boards or in virtualized environments.
1908  * This function parses and validates the configuration string specified by
1909  * the user and configues the virtual display configuration (number of
1910  * virtual connectors, crtcs, etc.) specified.
1911  */
1912 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1913 {
1914 	adev->enable_virtual_display = false;
1915 
1916 	if (amdgpu_virtual_display) {
1917 		const char *pci_address_name = pci_name(adev->pdev);
1918 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1919 
1920 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1921 		pciaddstr_tmp = pciaddstr;
1922 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1923 			pciaddname = strsep(&pciaddname_tmp, ",");
1924 			if (!strcmp("all", pciaddname)
1925 			    || !strcmp(pci_address_name, pciaddname)) {
1926 				long num_crtc;
1927 				int res = -1;
1928 
1929 				adev->enable_virtual_display = true;
1930 
1931 				if (pciaddname_tmp)
1932 					res = kstrtol(pciaddname_tmp, 10,
1933 						      &num_crtc);
1934 
1935 				if (!res) {
1936 					if (num_crtc < 1)
1937 						num_crtc = 1;
1938 					if (num_crtc > 6)
1939 						num_crtc = 6;
1940 					adev->mode_info.num_crtc = num_crtc;
1941 				} else {
1942 					adev->mode_info.num_crtc = 1;
1943 				}
1944 				break;
1945 			}
1946 		}
1947 
1948 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1949 			 amdgpu_virtual_display, pci_address_name,
1950 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1951 
1952 		kfree(pciaddstr);
1953 	}
1954 }
1955 
1956 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1957 {
1958 	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1959 		adev->mode_info.num_crtc = 1;
1960 		adev->enable_virtual_display = true;
1961 		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1962 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1963 	}
1964 }
1965 
1966 /**
1967  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1968  *
1969  * @adev: amdgpu_device pointer
1970  *
1971  * Parses the asic configuration parameters specified in the gpu info
1972  * firmware and makes them availale to the driver for use in configuring
1973  * the asic.
1974  * Returns 0 on success, -EINVAL on failure.
1975  */
1976 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1977 {
1978 	const char *chip_name;
1979 	char fw_name[40];
1980 	int err;
1981 	const struct gpu_info_firmware_header_v1_0 *hdr;
1982 
1983 	adev->firmware.gpu_info_fw = NULL;
1984 
1985 	if (adev->mman.discovery_bin) {
1986 		/*
1987 		 * FIXME: The bounding box is still needed by Navi12, so
1988 		 * temporarily read it from gpu_info firmware. Should be dropped
1989 		 * when DAL no longer needs it.
1990 		 */
1991 		if (adev->asic_type != CHIP_NAVI12)
1992 			return 0;
1993 	}
1994 
1995 	switch (adev->asic_type) {
1996 	default:
1997 		return 0;
1998 	case CHIP_VEGA10:
1999 		chip_name = "vega10";
2000 		break;
2001 	case CHIP_VEGA12:
2002 		chip_name = "vega12";
2003 		break;
2004 	case CHIP_RAVEN:
2005 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2006 			chip_name = "raven2";
2007 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2008 			chip_name = "picasso";
2009 		else
2010 			chip_name = "raven";
2011 		break;
2012 	case CHIP_ARCTURUS:
2013 		chip_name = "arcturus";
2014 		break;
2015 	case CHIP_NAVI12:
2016 		chip_name = "navi12";
2017 		break;
2018 	}
2019 
2020 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2021 	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2022 	if (err) {
2023 		dev_err(adev->dev,
2024 			"Failed to get gpu_info firmware \"%s\"\n",
2025 			fw_name);
2026 		goto out;
2027 	}
2028 
2029 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2030 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2031 
2032 	switch (hdr->version_major) {
2033 	case 1:
2034 	{
2035 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2036 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2037 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2038 
2039 		/*
2040 		 * Should be droped when DAL no longer needs it.
2041 		 */
2042 		if (adev->asic_type == CHIP_NAVI12)
2043 			goto parse_soc_bounding_box;
2044 
2045 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2046 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2047 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2048 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2049 		adev->gfx.config.max_texture_channel_caches =
2050 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2051 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2052 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2053 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2054 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2055 		adev->gfx.config.double_offchip_lds_buf =
2056 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2057 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2058 		adev->gfx.cu_info.max_waves_per_simd =
2059 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2060 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2061 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2062 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2063 		if (hdr->version_minor >= 1) {
2064 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2065 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2066 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2067 			adev->gfx.config.num_sc_per_sh =
2068 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2069 			adev->gfx.config.num_packer_per_sc =
2070 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2071 		}
2072 
2073 parse_soc_bounding_box:
2074 		/*
2075 		 * soc bounding box info is not integrated in disocovery table,
2076 		 * we always need to parse it from gpu info firmware if needed.
2077 		 */
2078 		if (hdr->version_minor == 2) {
2079 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2080 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2081 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2082 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2083 		}
2084 		break;
2085 	}
2086 	default:
2087 		dev_err(adev->dev,
2088 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2089 		err = -EINVAL;
2090 		goto out;
2091 	}
2092 out:
2093 	return err;
2094 }
2095 
2096 /**
2097  * amdgpu_device_ip_early_init - run early init for hardware IPs
2098  *
2099  * @adev: amdgpu_device pointer
2100  *
2101  * Early initialization pass for hardware IPs.  The hardware IPs that make
2102  * up each asic are discovered each IP's early_init callback is run.  This
2103  * is the first stage in initializing the asic.
2104  * Returns 0 on success, negative error code on failure.
2105  */
2106 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2107 {
2108 	struct drm_device *dev = adev_to_drm(adev);
2109 	struct pci_dev *parent;
2110 	int i, r;
2111 	bool total;
2112 
2113 	amdgpu_device_enable_virtual_display(adev);
2114 
2115 	if (amdgpu_sriov_vf(adev)) {
2116 		r = amdgpu_virt_request_full_gpu(adev, true);
2117 		if (r)
2118 			return r;
2119 	}
2120 
2121 	switch (adev->asic_type) {
2122 #ifdef CONFIG_DRM_AMDGPU_SI
2123 	case CHIP_VERDE:
2124 	case CHIP_TAHITI:
2125 	case CHIP_PITCAIRN:
2126 	case CHIP_OLAND:
2127 	case CHIP_HAINAN:
2128 		adev->family = AMDGPU_FAMILY_SI;
2129 		r = si_set_ip_blocks(adev);
2130 		if (r)
2131 			return r;
2132 		break;
2133 #endif
2134 #ifdef CONFIG_DRM_AMDGPU_CIK
2135 	case CHIP_BONAIRE:
2136 	case CHIP_HAWAII:
2137 	case CHIP_KAVERI:
2138 	case CHIP_KABINI:
2139 	case CHIP_MULLINS:
2140 		if (adev->flags & AMD_IS_APU)
2141 			adev->family = AMDGPU_FAMILY_KV;
2142 		else
2143 			adev->family = AMDGPU_FAMILY_CI;
2144 
2145 		r = cik_set_ip_blocks(adev);
2146 		if (r)
2147 			return r;
2148 		break;
2149 #endif
2150 	case CHIP_TOPAZ:
2151 	case CHIP_TONGA:
2152 	case CHIP_FIJI:
2153 	case CHIP_POLARIS10:
2154 	case CHIP_POLARIS11:
2155 	case CHIP_POLARIS12:
2156 	case CHIP_VEGAM:
2157 	case CHIP_CARRIZO:
2158 	case CHIP_STONEY:
2159 		if (adev->flags & AMD_IS_APU)
2160 			adev->family = AMDGPU_FAMILY_CZ;
2161 		else
2162 			adev->family = AMDGPU_FAMILY_VI;
2163 
2164 		r = vi_set_ip_blocks(adev);
2165 		if (r)
2166 			return r;
2167 		break;
2168 	default:
2169 		r = amdgpu_discovery_set_ip_blocks(adev);
2170 		if (r)
2171 			return r;
2172 		break;
2173 	}
2174 
2175 	if (amdgpu_has_atpx() &&
2176 	    (amdgpu_is_atpx_hybrid() ||
2177 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2178 	    ((adev->flags & AMD_IS_APU) == 0) &&
2179 	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2180 		adev->flags |= AMD_IS_PX;
2181 
2182 	if (!(adev->flags & AMD_IS_APU)) {
2183 		parent = pci_upstream_bridge(adev->pdev);
2184 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2185 	}
2186 
2187 
2188 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2189 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2190 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2191 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2192 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2193 
2194 	total = true;
2195 	for (i = 0; i < adev->num_ip_blocks; i++) {
2196 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2197 			DRM_ERROR("disabled ip block: %d <%s>\n",
2198 				  i, adev->ip_blocks[i].version->funcs->name);
2199 			adev->ip_blocks[i].status.valid = false;
2200 		} else {
2201 			if (adev->ip_blocks[i].version->funcs->early_init) {
2202 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2203 				if (r == -ENOENT) {
2204 					adev->ip_blocks[i].status.valid = false;
2205 				} else if (r) {
2206 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2207 						  adev->ip_blocks[i].version->funcs->name, r);
2208 					total = false;
2209 				} else {
2210 					adev->ip_blocks[i].status.valid = true;
2211 				}
2212 			} else {
2213 				adev->ip_blocks[i].status.valid = true;
2214 			}
2215 		}
2216 		/* get the vbios after the asic_funcs are set up */
2217 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2218 			r = amdgpu_device_parse_gpu_info_fw(adev);
2219 			if (r)
2220 				return r;
2221 
2222 			/* Read BIOS */
2223 			if (!amdgpu_get_bios(adev))
2224 				return -EINVAL;
2225 
2226 			r = amdgpu_atombios_init(adev);
2227 			if (r) {
2228 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2229 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2230 				return r;
2231 			}
2232 
2233 			/*get pf2vf msg info at it's earliest time*/
2234 			if (amdgpu_sriov_vf(adev))
2235 				amdgpu_virt_init_data_exchange(adev);
2236 
2237 		}
2238 	}
2239 	if (!total)
2240 		return -ENODEV;
2241 
2242 	amdgpu_amdkfd_device_probe(adev);
2243 	adev->cg_flags &= amdgpu_cg_mask;
2244 	adev->pg_flags &= amdgpu_pg_mask;
2245 
2246 	return 0;
2247 }
2248 
2249 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2250 {
2251 	int i, r;
2252 
2253 	for (i = 0; i < adev->num_ip_blocks; i++) {
2254 		if (!adev->ip_blocks[i].status.sw)
2255 			continue;
2256 		if (adev->ip_blocks[i].status.hw)
2257 			continue;
2258 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2259 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2260 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2261 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2262 			if (r) {
2263 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2264 					  adev->ip_blocks[i].version->funcs->name, r);
2265 				return r;
2266 			}
2267 			adev->ip_blocks[i].status.hw = true;
2268 		}
2269 	}
2270 
2271 	return 0;
2272 }
2273 
2274 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2275 {
2276 	int i, r;
2277 
2278 	for (i = 0; i < adev->num_ip_blocks; i++) {
2279 		if (!adev->ip_blocks[i].status.sw)
2280 			continue;
2281 		if (adev->ip_blocks[i].status.hw)
2282 			continue;
2283 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2284 		if (r) {
2285 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2286 				  adev->ip_blocks[i].version->funcs->name, r);
2287 			return r;
2288 		}
2289 		adev->ip_blocks[i].status.hw = true;
2290 	}
2291 
2292 	return 0;
2293 }
2294 
2295 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2296 {
2297 	int r = 0;
2298 	int i;
2299 	uint32_t smu_version;
2300 
2301 	if (adev->asic_type >= CHIP_VEGA10) {
2302 		for (i = 0; i < adev->num_ip_blocks; i++) {
2303 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2304 				continue;
2305 
2306 			if (!adev->ip_blocks[i].status.sw)
2307 				continue;
2308 
2309 			/* no need to do the fw loading again if already done*/
2310 			if (adev->ip_blocks[i].status.hw == true)
2311 				break;
2312 
2313 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2314 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2315 				if (r) {
2316 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2317 							  adev->ip_blocks[i].version->funcs->name, r);
2318 					return r;
2319 				}
2320 			} else {
2321 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2322 				if (r) {
2323 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2324 							  adev->ip_blocks[i].version->funcs->name, r);
2325 					return r;
2326 				}
2327 			}
2328 
2329 			adev->ip_blocks[i].status.hw = true;
2330 			break;
2331 		}
2332 	}
2333 
2334 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2335 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2336 
2337 	return r;
2338 }
2339 
2340 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2341 {
2342 	long timeout;
2343 	int r, i;
2344 
2345 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2346 		struct amdgpu_ring *ring = adev->rings[i];
2347 
2348 		/* No need to setup the GPU scheduler for rings that don't need it */
2349 		if (!ring || ring->no_scheduler)
2350 			continue;
2351 
2352 		switch (ring->funcs->type) {
2353 		case AMDGPU_RING_TYPE_GFX:
2354 			timeout = adev->gfx_timeout;
2355 			break;
2356 		case AMDGPU_RING_TYPE_COMPUTE:
2357 			timeout = adev->compute_timeout;
2358 			break;
2359 		case AMDGPU_RING_TYPE_SDMA:
2360 			timeout = adev->sdma_timeout;
2361 			break;
2362 		default:
2363 			timeout = adev->video_timeout;
2364 			break;
2365 		}
2366 
2367 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2368 				   ring->num_hw_submission, 0,
2369 				   timeout, adev->reset_domain->wq,
2370 				   ring->sched_score, ring->name,
2371 				   adev->dev);
2372 		if (r) {
2373 			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2374 				  ring->name);
2375 			return r;
2376 		}
2377 	}
2378 
2379 	return 0;
2380 }
2381 
2382 
2383 /**
2384  * amdgpu_device_ip_init - run init for hardware IPs
2385  *
2386  * @adev: amdgpu_device pointer
2387  *
2388  * Main initialization pass for hardware IPs.  The list of all the hardware
2389  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2390  * are run.  sw_init initializes the software state associated with each IP
2391  * and hw_init initializes the hardware associated with each IP.
2392  * Returns 0 on success, negative error code on failure.
2393  */
2394 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2395 {
2396 	int i, r;
2397 
2398 	r = amdgpu_ras_init(adev);
2399 	if (r)
2400 		return r;
2401 
2402 	for (i = 0; i < adev->num_ip_blocks; i++) {
2403 		if (!adev->ip_blocks[i].status.valid)
2404 			continue;
2405 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2406 		if (r) {
2407 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2408 				  adev->ip_blocks[i].version->funcs->name, r);
2409 			goto init_failed;
2410 		}
2411 		adev->ip_blocks[i].status.sw = true;
2412 
2413 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2414 			/* need to do common hw init early so everything is set up for gmc */
2415 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2416 			if (r) {
2417 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2418 				goto init_failed;
2419 			}
2420 			adev->ip_blocks[i].status.hw = true;
2421 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2422 			/* need to do gmc hw init early so we can allocate gpu mem */
2423 			/* Try to reserve bad pages early */
2424 			if (amdgpu_sriov_vf(adev))
2425 				amdgpu_virt_exchange_data(adev);
2426 
2427 			r = amdgpu_device_mem_scratch_init(adev);
2428 			if (r) {
2429 				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2430 				goto init_failed;
2431 			}
2432 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2433 			if (r) {
2434 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2435 				goto init_failed;
2436 			}
2437 			r = amdgpu_device_wb_init(adev);
2438 			if (r) {
2439 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2440 				goto init_failed;
2441 			}
2442 			adev->ip_blocks[i].status.hw = true;
2443 
2444 			/* right after GMC hw init, we create CSA */
2445 			if (amdgpu_mcbp) {
2446 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2447 							       AMDGPU_GEM_DOMAIN_VRAM |
2448 							       AMDGPU_GEM_DOMAIN_GTT,
2449 							       AMDGPU_CSA_SIZE);
2450 				if (r) {
2451 					DRM_ERROR("allocate CSA failed %d\n", r);
2452 					goto init_failed;
2453 				}
2454 			}
2455 		}
2456 	}
2457 
2458 	if (amdgpu_sriov_vf(adev))
2459 		amdgpu_virt_init_data_exchange(adev);
2460 
2461 	r = amdgpu_ib_pool_init(adev);
2462 	if (r) {
2463 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2464 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2465 		goto init_failed;
2466 	}
2467 
2468 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2469 	if (r)
2470 		goto init_failed;
2471 
2472 	r = amdgpu_device_ip_hw_init_phase1(adev);
2473 	if (r)
2474 		goto init_failed;
2475 
2476 	r = amdgpu_device_fw_loading(adev);
2477 	if (r)
2478 		goto init_failed;
2479 
2480 	r = amdgpu_device_ip_hw_init_phase2(adev);
2481 	if (r)
2482 		goto init_failed;
2483 
2484 	/*
2485 	 * retired pages will be loaded from eeprom and reserved here,
2486 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2487 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2488 	 * for I2C communication which only true at this point.
2489 	 *
2490 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2491 	 * failure from bad gpu situation and stop amdgpu init process
2492 	 * accordingly. For other failed cases, it will still release all
2493 	 * the resource and print error message, rather than returning one
2494 	 * negative value to upper level.
2495 	 *
2496 	 * Note: theoretically, this should be called before all vram allocations
2497 	 * to protect retired page from abusing
2498 	 */
2499 	r = amdgpu_ras_recovery_init(adev);
2500 	if (r)
2501 		goto init_failed;
2502 
2503 	/**
2504 	 * In case of XGMI grab extra reference for reset domain for this device
2505 	 */
2506 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2507 		if (amdgpu_xgmi_add_device(adev) == 0) {
2508 			if (!amdgpu_sriov_vf(adev)) {
2509 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2510 
2511 				if (WARN_ON(!hive)) {
2512 					r = -ENOENT;
2513 					goto init_failed;
2514 				}
2515 
2516 				if (!hive->reset_domain ||
2517 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2518 					r = -ENOENT;
2519 					amdgpu_put_xgmi_hive(hive);
2520 					goto init_failed;
2521 				}
2522 
2523 				/* Drop the early temporary reset domain we created for device */
2524 				amdgpu_reset_put_reset_domain(adev->reset_domain);
2525 				adev->reset_domain = hive->reset_domain;
2526 				amdgpu_put_xgmi_hive(hive);
2527 			}
2528 		}
2529 	}
2530 
2531 	r = amdgpu_device_init_schedulers(adev);
2532 	if (r)
2533 		goto init_failed;
2534 
2535 	/* Don't init kfd if whole hive need to be reset during init */
2536 	if (!adev->gmc.xgmi.pending_reset)
2537 		amdgpu_amdkfd_device_init(adev);
2538 
2539 	amdgpu_fru_get_product_info(adev);
2540 
2541 init_failed:
2542 	if (amdgpu_sriov_vf(adev))
2543 		amdgpu_virt_release_full_gpu(adev, true);
2544 
2545 	return r;
2546 }
2547 
2548 /**
2549  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2550  *
2551  * @adev: amdgpu_device pointer
2552  *
2553  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2554  * this function before a GPU reset.  If the value is retained after a
2555  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2556  */
2557 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2558 {
2559 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2560 }
2561 
2562 /**
2563  * amdgpu_device_check_vram_lost - check if vram is valid
2564  *
2565  * @adev: amdgpu_device pointer
2566  *
2567  * Checks the reset magic value written to the gart pointer in VRAM.
2568  * The driver calls this after a GPU reset to see if the contents of
2569  * VRAM is lost or now.
2570  * returns true if vram is lost, false if not.
2571  */
2572 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2573 {
2574 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2575 			AMDGPU_RESET_MAGIC_NUM))
2576 		return true;
2577 
2578 	if (!amdgpu_in_reset(adev))
2579 		return false;
2580 
2581 	/*
2582 	 * For all ASICs with baco/mode1 reset, the VRAM is
2583 	 * always assumed to be lost.
2584 	 */
2585 	switch (amdgpu_asic_reset_method(adev)) {
2586 	case AMD_RESET_METHOD_BACO:
2587 	case AMD_RESET_METHOD_MODE1:
2588 		return true;
2589 	default:
2590 		return false;
2591 	}
2592 }
2593 
2594 /**
2595  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2596  *
2597  * @adev: amdgpu_device pointer
2598  * @state: clockgating state (gate or ungate)
2599  *
2600  * The list of all the hardware IPs that make up the asic is walked and the
2601  * set_clockgating_state callbacks are run.
2602  * Late initialization pass enabling clockgating for hardware IPs.
2603  * Fini or suspend, pass disabling clockgating for hardware IPs.
2604  * Returns 0 on success, negative error code on failure.
2605  */
2606 
2607 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2608 			       enum amd_clockgating_state state)
2609 {
2610 	int i, j, r;
2611 
2612 	if (amdgpu_emu_mode == 1)
2613 		return 0;
2614 
2615 	for (j = 0; j < adev->num_ip_blocks; j++) {
2616 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2617 		if (!adev->ip_blocks[i].status.late_initialized)
2618 			continue;
2619 		/* skip CG for GFX, SDMA on S0ix */
2620 		if (adev->in_s0ix &&
2621 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2622 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2623 			continue;
2624 		/* skip CG for VCE/UVD, it's handled specially */
2625 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2626 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2627 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2628 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2629 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2630 			/* enable clockgating to save power */
2631 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2632 										     state);
2633 			if (r) {
2634 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2635 					  adev->ip_blocks[i].version->funcs->name, r);
2636 				return r;
2637 			}
2638 		}
2639 	}
2640 
2641 	return 0;
2642 }
2643 
2644 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2645 			       enum amd_powergating_state state)
2646 {
2647 	int i, j, r;
2648 
2649 	if (amdgpu_emu_mode == 1)
2650 		return 0;
2651 
2652 	for (j = 0; j < adev->num_ip_blocks; j++) {
2653 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2654 		if (!adev->ip_blocks[i].status.late_initialized)
2655 			continue;
2656 		/* skip PG for GFX, SDMA on S0ix */
2657 		if (adev->in_s0ix &&
2658 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2659 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2660 			continue;
2661 		/* skip CG for VCE/UVD, it's handled specially */
2662 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2663 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2664 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2665 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2666 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2667 			/* enable powergating to save power */
2668 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2669 											state);
2670 			if (r) {
2671 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2672 					  adev->ip_blocks[i].version->funcs->name, r);
2673 				return r;
2674 			}
2675 		}
2676 	}
2677 	return 0;
2678 }
2679 
2680 static int amdgpu_device_enable_mgpu_fan_boost(void)
2681 {
2682 	struct amdgpu_gpu_instance *gpu_ins;
2683 	struct amdgpu_device *adev;
2684 	int i, ret = 0;
2685 
2686 	mutex_lock(&mgpu_info.mutex);
2687 
2688 	/*
2689 	 * MGPU fan boost feature should be enabled
2690 	 * only when there are two or more dGPUs in
2691 	 * the system
2692 	 */
2693 	if (mgpu_info.num_dgpu < 2)
2694 		goto out;
2695 
2696 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2697 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2698 		adev = gpu_ins->adev;
2699 		if (!(adev->flags & AMD_IS_APU) &&
2700 		    !gpu_ins->mgpu_fan_enabled) {
2701 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2702 			if (ret)
2703 				break;
2704 
2705 			gpu_ins->mgpu_fan_enabled = 1;
2706 		}
2707 	}
2708 
2709 out:
2710 	mutex_unlock(&mgpu_info.mutex);
2711 
2712 	return ret;
2713 }
2714 
2715 /**
2716  * amdgpu_device_ip_late_init - run late init for hardware IPs
2717  *
2718  * @adev: amdgpu_device pointer
2719  *
2720  * Late initialization pass for hardware IPs.  The list of all the hardware
2721  * IPs that make up the asic is walked and the late_init callbacks are run.
2722  * late_init covers any special initialization that an IP requires
2723  * after all of the have been initialized or something that needs to happen
2724  * late in the init process.
2725  * Returns 0 on success, negative error code on failure.
2726  */
2727 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2728 {
2729 	struct amdgpu_gpu_instance *gpu_instance;
2730 	int i = 0, r;
2731 
2732 	for (i = 0; i < adev->num_ip_blocks; i++) {
2733 		if (!adev->ip_blocks[i].status.hw)
2734 			continue;
2735 		if (adev->ip_blocks[i].version->funcs->late_init) {
2736 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2737 			if (r) {
2738 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2739 					  adev->ip_blocks[i].version->funcs->name, r);
2740 				return r;
2741 			}
2742 		}
2743 		adev->ip_blocks[i].status.late_initialized = true;
2744 	}
2745 
2746 	r = amdgpu_ras_late_init(adev);
2747 	if (r) {
2748 		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2749 		return r;
2750 	}
2751 
2752 	amdgpu_ras_set_error_query_ready(adev, true);
2753 
2754 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2755 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2756 
2757 	amdgpu_device_fill_reset_magic(adev);
2758 
2759 	r = amdgpu_device_enable_mgpu_fan_boost();
2760 	if (r)
2761 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2762 
2763 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2764 	if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2765 			       adev->asic_type == CHIP_ALDEBARAN ))
2766 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2767 
2768 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2769 		mutex_lock(&mgpu_info.mutex);
2770 
2771 		/*
2772 		 * Reset device p-state to low as this was booted with high.
2773 		 *
2774 		 * This should be performed only after all devices from the same
2775 		 * hive get initialized.
2776 		 *
2777 		 * However, it's unknown how many device in the hive in advance.
2778 		 * As this is counted one by one during devices initializations.
2779 		 *
2780 		 * So, we wait for all XGMI interlinked devices initialized.
2781 		 * This may bring some delays as those devices may come from
2782 		 * different hives. But that should be OK.
2783 		 */
2784 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2785 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2786 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2787 				if (gpu_instance->adev->flags & AMD_IS_APU)
2788 					continue;
2789 
2790 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2791 						AMDGPU_XGMI_PSTATE_MIN);
2792 				if (r) {
2793 					DRM_ERROR("pstate setting failed (%d).\n", r);
2794 					break;
2795 				}
2796 			}
2797 		}
2798 
2799 		mutex_unlock(&mgpu_info.mutex);
2800 	}
2801 
2802 	return 0;
2803 }
2804 
2805 /**
2806  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2807  *
2808  * @adev: amdgpu_device pointer
2809  *
2810  * For ASICs need to disable SMC first
2811  */
2812 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2813 {
2814 	int i, r;
2815 
2816 	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2817 		return;
2818 
2819 	for (i = 0; i < adev->num_ip_blocks; i++) {
2820 		if (!adev->ip_blocks[i].status.hw)
2821 			continue;
2822 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2823 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2824 			/* XXX handle errors */
2825 			if (r) {
2826 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2827 					  adev->ip_blocks[i].version->funcs->name, r);
2828 			}
2829 			adev->ip_blocks[i].status.hw = false;
2830 			break;
2831 		}
2832 	}
2833 }
2834 
2835 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2836 {
2837 	int i, r;
2838 
2839 	for (i = 0; i < adev->num_ip_blocks; i++) {
2840 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2841 			continue;
2842 
2843 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2844 		if (r) {
2845 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2846 				  adev->ip_blocks[i].version->funcs->name, r);
2847 		}
2848 	}
2849 
2850 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2851 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2852 
2853 	amdgpu_amdkfd_suspend(adev, false);
2854 
2855 	/* Workaroud for ASICs need to disable SMC first */
2856 	amdgpu_device_smu_fini_early(adev);
2857 
2858 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2859 		if (!adev->ip_blocks[i].status.hw)
2860 			continue;
2861 
2862 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2863 		/* XXX handle errors */
2864 		if (r) {
2865 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2866 				  adev->ip_blocks[i].version->funcs->name, r);
2867 		}
2868 
2869 		adev->ip_blocks[i].status.hw = false;
2870 	}
2871 
2872 	if (amdgpu_sriov_vf(adev)) {
2873 		if (amdgpu_virt_release_full_gpu(adev, false))
2874 			DRM_ERROR("failed to release exclusive mode on fini\n");
2875 	}
2876 
2877 	return 0;
2878 }
2879 
2880 /**
2881  * amdgpu_device_ip_fini - run fini for hardware IPs
2882  *
2883  * @adev: amdgpu_device pointer
2884  *
2885  * Main teardown pass for hardware IPs.  The list of all the hardware
2886  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2887  * are run.  hw_fini tears down the hardware associated with each IP
2888  * and sw_fini tears down any software state associated with each IP.
2889  * Returns 0 on success, negative error code on failure.
2890  */
2891 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2892 {
2893 	int i, r;
2894 
2895 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2896 		amdgpu_virt_release_ras_err_handler_data(adev);
2897 
2898 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2899 		amdgpu_xgmi_remove_device(adev);
2900 
2901 	amdgpu_amdkfd_device_fini_sw(adev);
2902 
2903 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2904 		if (!adev->ip_blocks[i].status.sw)
2905 			continue;
2906 
2907 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2908 			amdgpu_ucode_free_bo(adev);
2909 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2910 			amdgpu_device_wb_fini(adev);
2911 			amdgpu_device_mem_scratch_fini(adev);
2912 			amdgpu_ib_pool_fini(adev);
2913 		}
2914 
2915 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2916 		/* XXX handle errors */
2917 		if (r) {
2918 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2919 				  adev->ip_blocks[i].version->funcs->name, r);
2920 		}
2921 		adev->ip_blocks[i].status.sw = false;
2922 		adev->ip_blocks[i].status.valid = false;
2923 	}
2924 
2925 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2926 		if (!adev->ip_blocks[i].status.late_initialized)
2927 			continue;
2928 		if (adev->ip_blocks[i].version->funcs->late_fini)
2929 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2930 		adev->ip_blocks[i].status.late_initialized = false;
2931 	}
2932 
2933 	amdgpu_ras_fini(adev);
2934 
2935 	return 0;
2936 }
2937 
2938 /**
2939  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2940  *
2941  * @work: work_struct.
2942  */
2943 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2944 {
2945 	struct amdgpu_device *adev =
2946 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2947 	int r;
2948 
2949 	r = amdgpu_ib_ring_tests(adev);
2950 	if (r)
2951 		DRM_ERROR("ib ring test failed (%d).\n", r);
2952 }
2953 
2954 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2955 {
2956 	struct amdgpu_device *adev =
2957 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2958 
2959 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2960 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2961 
2962 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2963 		adev->gfx.gfx_off_state = true;
2964 }
2965 
2966 /**
2967  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2968  *
2969  * @adev: amdgpu_device pointer
2970  *
2971  * Main suspend function for hardware IPs.  The list of all the hardware
2972  * IPs that make up the asic is walked, clockgating is disabled and the
2973  * suspend callbacks are run.  suspend puts the hardware and software state
2974  * in each IP into a state suitable for suspend.
2975  * Returns 0 on success, negative error code on failure.
2976  */
2977 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2978 {
2979 	int i, r;
2980 
2981 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2982 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2983 
2984 	/*
2985 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
2986 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2987 	 * scenario. Add the missing df cstate disablement here.
2988 	 */
2989 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2990 		dev_warn(adev->dev, "Failed to disallow df cstate");
2991 
2992 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2993 		if (!adev->ip_blocks[i].status.valid)
2994 			continue;
2995 
2996 		/* displays are handled separately */
2997 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2998 			continue;
2999 
3000 		/* XXX handle errors */
3001 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3002 		/* XXX handle errors */
3003 		if (r) {
3004 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3005 				  adev->ip_blocks[i].version->funcs->name, r);
3006 			return r;
3007 		}
3008 
3009 		adev->ip_blocks[i].status.hw = false;
3010 	}
3011 
3012 	return 0;
3013 }
3014 
3015 /**
3016  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3017  *
3018  * @adev: amdgpu_device pointer
3019  *
3020  * Main suspend function for hardware IPs.  The list of all the hardware
3021  * IPs that make up the asic is walked, clockgating is disabled and the
3022  * suspend callbacks are run.  suspend puts the hardware and software state
3023  * in each IP into a state suitable for suspend.
3024  * Returns 0 on success, negative error code on failure.
3025  */
3026 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3027 {
3028 	int i, r;
3029 
3030 	if (adev->in_s0ix)
3031 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3032 
3033 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3034 		if (!adev->ip_blocks[i].status.valid)
3035 			continue;
3036 		/* displays are handled in phase1 */
3037 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3038 			continue;
3039 		/* PSP lost connection when err_event_athub occurs */
3040 		if (amdgpu_ras_intr_triggered() &&
3041 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3042 			adev->ip_blocks[i].status.hw = false;
3043 			continue;
3044 		}
3045 
3046 		/* skip unnecessary suspend if we do not initialize them yet */
3047 		if (adev->gmc.xgmi.pending_reset &&
3048 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3049 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3050 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3051 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3052 			adev->ip_blocks[i].status.hw = false;
3053 			continue;
3054 		}
3055 
3056 		/* skip suspend of gfx/mes and psp for S0ix
3057 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3058 		 * like at runtime. PSP is also part of the always on hardware
3059 		 * so no need to suspend it.
3060 		 */
3061 		if (adev->in_s0ix &&
3062 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3063 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3064 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3065 			continue;
3066 
3067 		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3068 		if (adev->in_s0ix &&
3069 		    (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
3070 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3071 			continue;
3072 
3073 		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3074 		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3075 		 * from this location and RLC Autoload automatically also gets loaded
3076 		 * from here based on PMFW -> PSP message during re-init sequence.
3077 		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3078 		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3079 		 */
3080 		if (amdgpu_in_reset(adev) &&
3081 		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3082 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3083 			continue;
3084 
3085 		/* XXX handle errors */
3086 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3087 		/* XXX handle errors */
3088 		if (r) {
3089 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3090 				  adev->ip_blocks[i].version->funcs->name, r);
3091 		}
3092 		adev->ip_blocks[i].status.hw = false;
3093 		/* handle putting the SMC in the appropriate state */
3094 		if(!amdgpu_sriov_vf(adev)){
3095 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3096 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3097 				if (r) {
3098 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3099 							adev->mp1_state, r);
3100 					return r;
3101 				}
3102 			}
3103 		}
3104 	}
3105 
3106 	return 0;
3107 }
3108 
3109 /**
3110  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3111  *
3112  * @adev: amdgpu_device pointer
3113  *
3114  * Main suspend function for hardware IPs.  The list of all the hardware
3115  * IPs that make up the asic is walked, clockgating is disabled and the
3116  * suspend callbacks are run.  suspend puts the hardware and software state
3117  * in each IP into a state suitable for suspend.
3118  * Returns 0 on success, negative error code on failure.
3119  */
3120 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3121 {
3122 	int r;
3123 
3124 	if (amdgpu_sriov_vf(adev)) {
3125 		amdgpu_virt_fini_data_exchange(adev);
3126 		amdgpu_virt_request_full_gpu(adev, false);
3127 	}
3128 
3129 	r = amdgpu_device_ip_suspend_phase1(adev);
3130 	if (r)
3131 		return r;
3132 	r = amdgpu_device_ip_suspend_phase2(adev);
3133 
3134 	if (amdgpu_sriov_vf(adev))
3135 		amdgpu_virt_release_full_gpu(adev, false);
3136 
3137 	return r;
3138 }
3139 
3140 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3141 {
3142 	int i, r;
3143 
3144 	static enum amd_ip_block_type ip_order[] = {
3145 		AMD_IP_BLOCK_TYPE_COMMON,
3146 		AMD_IP_BLOCK_TYPE_GMC,
3147 		AMD_IP_BLOCK_TYPE_PSP,
3148 		AMD_IP_BLOCK_TYPE_IH,
3149 	};
3150 
3151 	for (i = 0; i < adev->num_ip_blocks; i++) {
3152 		int j;
3153 		struct amdgpu_ip_block *block;
3154 
3155 		block = &adev->ip_blocks[i];
3156 		block->status.hw = false;
3157 
3158 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3159 
3160 			if (block->version->type != ip_order[j] ||
3161 				!block->status.valid)
3162 				continue;
3163 
3164 			r = block->version->funcs->hw_init(adev);
3165 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3166 			if (r)
3167 				return r;
3168 			block->status.hw = true;
3169 		}
3170 	}
3171 
3172 	return 0;
3173 }
3174 
3175 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3176 {
3177 	int i, r;
3178 
3179 	static enum amd_ip_block_type ip_order[] = {
3180 		AMD_IP_BLOCK_TYPE_SMC,
3181 		AMD_IP_BLOCK_TYPE_DCE,
3182 		AMD_IP_BLOCK_TYPE_GFX,
3183 		AMD_IP_BLOCK_TYPE_SDMA,
3184 		AMD_IP_BLOCK_TYPE_MES,
3185 		AMD_IP_BLOCK_TYPE_UVD,
3186 		AMD_IP_BLOCK_TYPE_VCE,
3187 		AMD_IP_BLOCK_TYPE_VCN,
3188 		AMD_IP_BLOCK_TYPE_JPEG
3189 	};
3190 
3191 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3192 		int j;
3193 		struct amdgpu_ip_block *block;
3194 
3195 		for (j = 0; j < adev->num_ip_blocks; j++) {
3196 			block = &adev->ip_blocks[j];
3197 
3198 			if (block->version->type != ip_order[i] ||
3199 				!block->status.valid ||
3200 				block->status.hw)
3201 				continue;
3202 
3203 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3204 				r = block->version->funcs->resume(adev);
3205 			else
3206 				r = block->version->funcs->hw_init(adev);
3207 
3208 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3209 			if (r)
3210 				return r;
3211 			block->status.hw = true;
3212 		}
3213 	}
3214 
3215 	return 0;
3216 }
3217 
3218 /**
3219  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3220  *
3221  * @adev: amdgpu_device pointer
3222  *
3223  * First resume function for hardware IPs.  The list of all the hardware
3224  * IPs that make up the asic is walked and the resume callbacks are run for
3225  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3226  * after a suspend and updates the software state as necessary.  This
3227  * function is also used for restoring the GPU after a GPU reset.
3228  * Returns 0 on success, negative error code on failure.
3229  */
3230 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3231 {
3232 	int i, r;
3233 
3234 	for (i = 0; i < adev->num_ip_blocks; i++) {
3235 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3236 			continue;
3237 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3238 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3239 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3240 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3241 
3242 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3243 			if (r) {
3244 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3245 					  adev->ip_blocks[i].version->funcs->name, r);
3246 				return r;
3247 			}
3248 			adev->ip_blocks[i].status.hw = true;
3249 		}
3250 	}
3251 
3252 	return 0;
3253 }
3254 
3255 /**
3256  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3257  *
3258  * @adev: amdgpu_device pointer
3259  *
3260  * First resume function for hardware IPs.  The list of all the hardware
3261  * IPs that make up the asic is walked and the resume callbacks are run for
3262  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3263  * functional state after a suspend and updates the software state as
3264  * necessary.  This function is also used for restoring the GPU after a GPU
3265  * reset.
3266  * Returns 0 on success, negative error code on failure.
3267  */
3268 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3269 {
3270 	int i, r;
3271 
3272 	for (i = 0; i < adev->num_ip_blocks; i++) {
3273 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3274 			continue;
3275 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3276 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3277 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3278 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3279 			continue;
3280 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3281 		if (r) {
3282 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3283 				  adev->ip_blocks[i].version->funcs->name, r);
3284 			return r;
3285 		}
3286 		adev->ip_blocks[i].status.hw = true;
3287 	}
3288 
3289 	return 0;
3290 }
3291 
3292 /**
3293  * amdgpu_device_ip_resume - run resume for hardware IPs
3294  *
3295  * @adev: amdgpu_device pointer
3296  *
3297  * Main resume function for hardware IPs.  The hardware IPs
3298  * are split into two resume functions because they are
3299  * are also used in in recovering from a GPU reset and some additional
3300  * steps need to be take between them.  In this case (S3/S4) they are
3301  * run sequentially.
3302  * Returns 0 on success, negative error code on failure.
3303  */
3304 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3305 {
3306 	int r;
3307 
3308 	if (!adev->in_s0ix) {
3309 		r = amdgpu_amdkfd_resume_iommu(adev);
3310 		if (r)
3311 			return r;
3312 	}
3313 
3314 	r = amdgpu_device_ip_resume_phase1(adev);
3315 	if (r)
3316 		return r;
3317 
3318 	r = amdgpu_device_fw_loading(adev);
3319 	if (r)
3320 		return r;
3321 
3322 	r = amdgpu_device_ip_resume_phase2(adev);
3323 
3324 	return r;
3325 }
3326 
3327 /**
3328  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3329  *
3330  * @adev: amdgpu_device pointer
3331  *
3332  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3333  */
3334 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3335 {
3336 	if (amdgpu_sriov_vf(adev)) {
3337 		if (adev->is_atom_fw) {
3338 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3339 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3340 		} else {
3341 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3342 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3343 		}
3344 
3345 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3346 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3347 	}
3348 }
3349 
3350 /**
3351  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3352  *
3353  * @asic_type: AMD asic type
3354  *
3355  * Check if there is DC (new modesetting infrastructre) support for an asic.
3356  * returns true if DC has support, false if not.
3357  */
3358 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3359 {
3360 	switch (asic_type) {
3361 #ifdef CONFIG_DRM_AMDGPU_SI
3362 	case CHIP_HAINAN:
3363 #endif
3364 	case CHIP_TOPAZ:
3365 		/* chips with no display hardware */
3366 		return false;
3367 #if defined(CONFIG_DRM_AMD_DC)
3368 	case CHIP_TAHITI:
3369 	case CHIP_PITCAIRN:
3370 	case CHIP_VERDE:
3371 	case CHIP_OLAND:
3372 		/*
3373 		 * We have systems in the wild with these ASICs that require
3374 		 * LVDS and VGA support which is not supported with DC.
3375 		 *
3376 		 * Fallback to the non-DC driver here by default so as not to
3377 		 * cause regressions.
3378 		 */
3379 #if defined(CONFIG_DRM_AMD_DC_SI)
3380 		return amdgpu_dc > 0;
3381 #else
3382 		return false;
3383 #endif
3384 	case CHIP_BONAIRE:
3385 	case CHIP_KAVERI:
3386 	case CHIP_KABINI:
3387 	case CHIP_MULLINS:
3388 		/*
3389 		 * We have systems in the wild with these ASICs that require
3390 		 * VGA support which is not supported with DC.
3391 		 *
3392 		 * Fallback to the non-DC driver here by default so as not to
3393 		 * cause regressions.
3394 		 */
3395 		return amdgpu_dc > 0;
3396 	default:
3397 		return amdgpu_dc != 0;
3398 #else
3399 	default:
3400 		if (amdgpu_dc > 0)
3401 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3402 					 "but isn't supported by ASIC, ignoring\n");
3403 		return false;
3404 #endif
3405 	}
3406 }
3407 
3408 /**
3409  * amdgpu_device_has_dc_support - check if dc is supported
3410  *
3411  * @adev: amdgpu_device pointer
3412  *
3413  * Returns true for supported, false for not supported
3414  */
3415 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3416 {
3417 	if (adev->enable_virtual_display ||
3418 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3419 		return false;
3420 
3421 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3422 }
3423 
3424 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3425 {
3426 	struct amdgpu_device *adev =
3427 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3428 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3429 
3430 	/* It's a bug to not have a hive within this function */
3431 	if (WARN_ON(!hive))
3432 		return;
3433 
3434 	/*
3435 	 * Use task barrier to synchronize all xgmi reset works across the
3436 	 * hive. task_barrier_enter and task_barrier_exit will block
3437 	 * until all the threads running the xgmi reset works reach
3438 	 * those points. task_barrier_full will do both blocks.
3439 	 */
3440 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3441 
3442 		task_barrier_enter(&hive->tb);
3443 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3444 
3445 		if (adev->asic_reset_res)
3446 			goto fail;
3447 
3448 		task_barrier_exit(&hive->tb);
3449 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3450 
3451 		if (adev->asic_reset_res)
3452 			goto fail;
3453 
3454 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3455 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3456 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3457 	} else {
3458 
3459 		task_barrier_full(&hive->tb);
3460 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3461 	}
3462 
3463 fail:
3464 	if (adev->asic_reset_res)
3465 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3466 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3467 	amdgpu_put_xgmi_hive(hive);
3468 }
3469 
3470 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3471 {
3472 	char *input = amdgpu_lockup_timeout;
3473 	char *timeout_setting = NULL;
3474 	int index = 0;
3475 	long timeout;
3476 	int ret = 0;
3477 
3478 	/*
3479 	 * By default timeout for non compute jobs is 10000
3480 	 * and 60000 for compute jobs.
3481 	 * In SR-IOV or passthrough mode, timeout for compute
3482 	 * jobs are 60000 by default.
3483 	 */
3484 	adev->gfx_timeout = msecs_to_jiffies(10000);
3485 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3486 	if (amdgpu_sriov_vf(adev))
3487 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3488 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3489 	else
3490 		adev->compute_timeout =  msecs_to_jiffies(60000);
3491 
3492 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3493 		while ((timeout_setting = strsep(&input, ",")) &&
3494 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3495 			ret = kstrtol(timeout_setting, 0, &timeout);
3496 			if (ret)
3497 				return ret;
3498 
3499 			if (timeout == 0) {
3500 				index++;
3501 				continue;
3502 			} else if (timeout < 0) {
3503 				timeout = MAX_SCHEDULE_TIMEOUT;
3504 				dev_warn(adev->dev, "lockup timeout disabled");
3505 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3506 			} else {
3507 				timeout = msecs_to_jiffies(timeout);
3508 			}
3509 
3510 			switch (index++) {
3511 			case 0:
3512 				adev->gfx_timeout = timeout;
3513 				break;
3514 			case 1:
3515 				adev->compute_timeout = timeout;
3516 				break;
3517 			case 2:
3518 				adev->sdma_timeout = timeout;
3519 				break;
3520 			case 3:
3521 				adev->video_timeout = timeout;
3522 				break;
3523 			default:
3524 				break;
3525 			}
3526 		}
3527 		/*
3528 		 * There is only one value specified and
3529 		 * it should apply to all non-compute jobs.
3530 		 */
3531 		if (index == 1) {
3532 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3533 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3534 				adev->compute_timeout = adev->gfx_timeout;
3535 		}
3536 	}
3537 
3538 	return ret;
3539 }
3540 
3541 /**
3542  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3543  *
3544  * @adev: amdgpu_device pointer
3545  *
3546  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3547  */
3548 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3549 {
3550 	struct iommu_domain *domain;
3551 
3552 	domain = iommu_get_domain_for_dev(adev->dev);
3553 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3554 		adev->ram_is_direct_mapped = true;
3555 }
3556 
3557 static const struct attribute *amdgpu_dev_attributes[] = {
3558 	&dev_attr_product_name.attr,
3559 	&dev_attr_product_number.attr,
3560 	&dev_attr_serial_number.attr,
3561 	&dev_attr_pcie_replay_count.attr,
3562 	NULL
3563 };
3564 
3565 /**
3566  * amdgpu_device_init - initialize the driver
3567  *
3568  * @adev: amdgpu_device pointer
3569  * @flags: driver flags
3570  *
3571  * Initializes the driver info and hw (all asics).
3572  * Returns 0 for success or an error on failure.
3573  * Called at driver startup.
3574  */
3575 int amdgpu_device_init(struct amdgpu_device *adev,
3576 		       uint32_t flags)
3577 {
3578 	struct drm_device *ddev = adev_to_drm(adev);
3579 	struct pci_dev *pdev = adev->pdev;
3580 	int r, i;
3581 	bool px = false;
3582 	u32 max_MBps;
3583 
3584 	adev->shutdown = false;
3585 	adev->flags = flags;
3586 
3587 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3588 		adev->asic_type = amdgpu_force_asic_type;
3589 	else
3590 		adev->asic_type = flags & AMD_ASIC_MASK;
3591 
3592 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3593 	if (amdgpu_emu_mode == 1)
3594 		adev->usec_timeout *= 10;
3595 	adev->gmc.gart_size = 512 * 1024 * 1024;
3596 	adev->accel_working = false;
3597 	adev->num_rings = 0;
3598 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3599 	adev->mman.buffer_funcs = NULL;
3600 	adev->mman.buffer_funcs_ring = NULL;
3601 	adev->vm_manager.vm_pte_funcs = NULL;
3602 	adev->vm_manager.vm_pte_num_scheds = 0;
3603 	adev->gmc.gmc_funcs = NULL;
3604 	adev->harvest_ip_mask = 0x0;
3605 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3606 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3607 
3608 	adev->smc_rreg = &amdgpu_invalid_rreg;
3609 	adev->smc_wreg = &amdgpu_invalid_wreg;
3610 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3611 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3612 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3613 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3614 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3615 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3616 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3617 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3618 	adev->didt_rreg = &amdgpu_invalid_rreg;
3619 	adev->didt_wreg = &amdgpu_invalid_wreg;
3620 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3621 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3622 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3623 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3624 
3625 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3626 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3627 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3628 
3629 	/* mutex initialization are all done here so we
3630 	 * can recall function without having locking issues */
3631 	mutex_init(&adev->firmware.mutex);
3632 	mutex_init(&adev->pm.mutex);
3633 	mutex_init(&adev->gfx.gpu_clock_mutex);
3634 	mutex_init(&adev->srbm_mutex);
3635 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3636 	mutex_init(&adev->gfx.gfx_off_mutex);
3637 	mutex_init(&adev->grbm_idx_mutex);
3638 	mutex_init(&adev->mn_lock);
3639 	mutex_init(&adev->virt.vf_errors.lock);
3640 	hash_init(adev->mn_hash);
3641 	mutex_init(&adev->psp.mutex);
3642 	mutex_init(&adev->notifier_lock);
3643 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3644 	mutex_init(&adev->benchmark_mutex);
3645 
3646 	amdgpu_device_init_apu_flags(adev);
3647 
3648 	r = amdgpu_device_check_arguments(adev);
3649 	if (r)
3650 		return r;
3651 
3652 	spin_lock_init(&adev->mmio_idx_lock);
3653 	spin_lock_init(&adev->smc_idx_lock);
3654 	spin_lock_init(&adev->pcie_idx_lock);
3655 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3656 	spin_lock_init(&adev->didt_idx_lock);
3657 	spin_lock_init(&adev->gc_cac_idx_lock);
3658 	spin_lock_init(&adev->se_cac_idx_lock);
3659 	spin_lock_init(&adev->audio_endpt_idx_lock);
3660 	spin_lock_init(&adev->mm_stats.lock);
3661 
3662 	INIT_LIST_HEAD(&adev->shadow_list);
3663 	mutex_init(&adev->shadow_list_lock);
3664 
3665 	INIT_LIST_HEAD(&adev->reset_list);
3666 
3667 	INIT_LIST_HEAD(&adev->ras_list);
3668 
3669 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3670 			  amdgpu_device_delayed_init_work_handler);
3671 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3672 			  amdgpu_device_delay_enable_gfx_off);
3673 
3674 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3675 
3676 	adev->gfx.gfx_off_req_count = 1;
3677 	adev->gfx.gfx_off_residency = 0;
3678 	adev->gfx.gfx_off_entrycount = 0;
3679 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3680 
3681 	atomic_set(&adev->throttling_logging_enabled, 1);
3682 	/*
3683 	 * If throttling continues, logging will be performed every minute
3684 	 * to avoid log flooding. "-1" is subtracted since the thermal
3685 	 * throttling interrupt comes every second. Thus, the total logging
3686 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3687 	 * for throttling interrupt) = 60 seconds.
3688 	 */
3689 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3690 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3691 
3692 	/* Registers mapping */
3693 	/* TODO: block userspace mapping of io register */
3694 	if (adev->asic_type >= CHIP_BONAIRE) {
3695 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3696 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3697 	} else {
3698 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3699 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3700 	}
3701 
3702 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3703 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3704 
3705 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3706 	if (adev->rmmio == NULL) {
3707 		return -ENOMEM;
3708 	}
3709 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3710 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3711 
3712 	amdgpu_device_get_pcie_info(adev);
3713 
3714 	if (amdgpu_mcbp)
3715 		DRM_INFO("MCBP is enabled\n");
3716 
3717 	/*
3718 	 * Reset domain needs to be present early, before XGMI hive discovered
3719 	 * (if any) and intitialized to use reset sem and in_gpu reset flag
3720 	 * early on during init and before calling to RREG32.
3721 	 */
3722 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3723 	if (!adev->reset_domain)
3724 		return -ENOMEM;
3725 
3726 	/* detect hw virtualization here */
3727 	amdgpu_detect_virtualization(adev);
3728 
3729 	r = amdgpu_device_get_job_timeout_settings(adev);
3730 	if (r) {
3731 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3732 		return r;
3733 	}
3734 
3735 	/* early init functions */
3736 	r = amdgpu_device_ip_early_init(adev);
3737 	if (r)
3738 		return r;
3739 
3740 	/* Get rid of things like offb */
3741 	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3742 	if (r)
3743 		return r;
3744 
3745 	/* Enable TMZ based on IP_VERSION */
3746 	amdgpu_gmc_tmz_set(adev);
3747 
3748 	amdgpu_gmc_noretry_set(adev);
3749 	/* Need to get xgmi info early to decide the reset behavior*/
3750 	if (adev->gmc.xgmi.supported) {
3751 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
3752 		if (r)
3753 			return r;
3754 	}
3755 
3756 	/* enable PCIE atomic ops */
3757 	if (amdgpu_sriov_vf(adev))
3758 		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3759 			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3760 			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3761 	else
3762 		adev->have_atomics_support =
3763 			!pci_enable_atomic_ops_to_root(adev->pdev,
3764 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3765 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3766 	if (!adev->have_atomics_support)
3767 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3768 
3769 	/* doorbell bar mapping and doorbell index init*/
3770 	amdgpu_device_doorbell_init(adev);
3771 
3772 	if (amdgpu_emu_mode == 1) {
3773 		/* post the asic on emulation mode */
3774 		emu_soc_asic_init(adev);
3775 		goto fence_driver_init;
3776 	}
3777 
3778 	amdgpu_reset_init(adev);
3779 
3780 	/* detect if we are with an SRIOV vbios */
3781 	amdgpu_device_detect_sriov_bios(adev);
3782 
3783 	/* check if we need to reset the asic
3784 	 *  E.g., driver was not cleanly unloaded previously, etc.
3785 	 */
3786 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3787 		if (adev->gmc.xgmi.num_physical_nodes) {
3788 			dev_info(adev->dev, "Pending hive reset.\n");
3789 			adev->gmc.xgmi.pending_reset = true;
3790 			/* Only need to init necessary block for SMU to handle the reset */
3791 			for (i = 0; i < adev->num_ip_blocks; i++) {
3792 				if (!adev->ip_blocks[i].status.valid)
3793 					continue;
3794 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3795 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3796 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3797 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3798 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3799 						adev->ip_blocks[i].version->funcs->name);
3800 					adev->ip_blocks[i].status.hw = true;
3801 				}
3802 			}
3803 		} else {
3804 			r = amdgpu_asic_reset(adev);
3805 			if (r) {
3806 				dev_err(adev->dev, "asic reset on init failed\n");
3807 				goto failed;
3808 			}
3809 		}
3810 	}
3811 
3812 	/* Post card if necessary */
3813 	if (amdgpu_device_need_post(adev)) {
3814 		if (!adev->bios) {
3815 			dev_err(adev->dev, "no vBIOS found\n");
3816 			r = -EINVAL;
3817 			goto failed;
3818 		}
3819 		DRM_INFO("GPU posting now...\n");
3820 		r = amdgpu_device_asic_init(adev);
3821 		if (r) {
3822 			dev_err(adev->dev, "gpu post error!\n");
3823 			goto failed;
3824 		}
3825 	}
3826 
3827 	if (adev->is_atom_fw) {
3828 		/* Initialize clocks */
3829 		r = amdgpu_atomfirmware_get_clock_info(adev);
3830 		if (r) {
3831 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3832 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3833 			goto failed;
3834 		}
3835 	} else {
3836 		/* Initialize clocks */
3837 		r = amdgpu_atombios_get_clock_info(adev);
3838 		if (r) {
3839 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3840 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3841 			goto failed;
3842 		}
3843 		/* init i2c buses */
3844 		if (!amdgpu_device_has_dc_support(adev))
3845 			amdgpu_atombios_i2c_init(adev);
3846 	}
3847 
3848 fence_driver_init:
3849 	/* Fence driver */
3850 	r = amdgpu_fence_driver_sw_init(adev);
3851 	if (r) {
3852 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3853 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3854 		goto failed;
3855 	}
3856 
3857 	/* init the mode config */
3858 	drm_mode_config_init(adev_to_drm(adev));
3859 
3860 	r = amdgpu_device_ip_init(adev);
3861 	if (r) {
3862 		/* failed in exclusive mode due to timeout */
3863 		if (amdgpu_sriov_vf(adev) &&
3864 		    !amdgpu_sriov_runtime(adev) &&
3865 		    amdgpu_virt_mmio_blocked(adev) &&
3866 		    !amdgpu_virt_wait_reset(adev)) {
3867 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3868 			/* Don't send request since VF is inactive. */
3869 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3870 			adev->virt.ops = NULL;
3871 			r = -EAGAIN;
3872 			goto release_ras_con;
3873 		}
3874 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3875 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3876 		goto release_ras_con;
3877 	}
3878 
3879 	amdgpu_fence_driver_hw_init(adev);
3880 
3881 	dev_info(adev->dev,
3882 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3883 			adev->gfx.config.max_shader_engines,
3884 			adev->gfx.config.max_sh_per_se,
3885 			adev->gfx.config.max_cu_per_sh,
3886 			adev->gfx.cu_info.number);
3887 
3888 	adev->accel_working = true;
3889 
3890 	amdgpu_vm_check_compute_bug(adev);
3891 
3892 	/* Initialize the buffer migration limit. */
3893 	if (amdgpu_moverate >= 0)
3894 		max_MBps = amdgpu_moverate;
3895 	else
3896 		max_MBps = 8; /* Allow 8 MB/s. */
3897 	/* Get a log2 for easy divisions. */
3898 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3899 
3900 	r = amdgpu_pm_sysfs_init(adev);
3901 	if (r)
3902 		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
3903 
3904 	r = amdgpu_ucode_sysfs_init(adev);
3905 	if (r) {
3906 		adev->ucode_sysfs_en = false;
3907 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3908 	} else
3909 		adev->ucode_sysfs_en = true;
3910 
3911 	r = amdgpu_psp_sysfs_init(adev);
3912 	if (r) {
3913 		adev->psp_sysfs_en = false;
3914 		if (!amdgpu_sriov_vf(adev))
3915 			DRM_ERROR("Creating psp sysfs failed\n");
3916 	} else
3917 		adev->psp_sysfs_en = true;
3918 
3919 	/*
3920 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3921 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3922 	 * gpu instance is counted less.
3923 	 */
3924 	amdgpu_register_gpu_instance(adev);
3925 
3926 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3927 	 * explicit gating rather than handling it automatically.
3928 	 */
3929 	if (!adev->gmc.xgmi.pending_reset) {
3930 		r = amdgpu_device_ip_late_init(adev);
3931 		if (r) {
3932 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3933 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3934 			goto release_ras_con;
3935 		}
3936 		/* must succeed. */
3937 		amdgpu_ras_resume(adev);
3938 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3939 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3940 	}
3941 
3942 	if (amdgpu_sriov_vf(adev))
3943 		flush_delayed_work(&adev->delayed_init_work);
3944 
3945 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3946 	if (r)
3947 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3948 
3949 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3950 		r = amdgpu_pmu_init(adev);
3951 	if (r)
3952 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3953 
3954 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3955 	if (amdgpu_device_cache_pci_state(adev->pdev))
3956 		pci_restore_state(pdev);
3957 
3958 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3959 	/* this will fail for cards that aren't VGA class devices, just
3960 	 * ignore it */
3961 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3962 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3963 
3964 	px = amdgpu_device_supports_px(ddev);
3965 
3966 	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
3967 				apple_gmux_detect(NULL, NULL)))
3968 		vga_switcheroo_register_client(adev->pdev,
3969 					       &amdgpu_switcheroo_ops, px);
3970 
3971 	if (px)
3972 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3973 
3974 	if (adev->gmc.xgmi.pending_reset)
3975 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3976 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3977 
3978 	amdgpu_device_check_iommu_direct_map(adev);
3979 
3980 	return 0;
3981 
3982 release_ras_con:
3983 	amdgpu_release_ras_context(adev);
3984 
3985 failed:
3986 	amdgpu_vf_error_trans_all(adev);
3987 
3988 	return r;
3989 }
3990 
3991 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3992 {
3993 
3994 	/* Clear all CPU mappings pointing to this device */
3995 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3996 
3997 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3998 	amdgpu_device_doorbell_fini(adev);
3999 
4000 	iounmap(adev->rmmio);
4001 	adev->rmmio = NULL;
4002 	if (adev->mman.aper_base_kaddr)
4003 		iounmap(adev->mman.aper_base_kaddr);
4004 	adev->mman.aper_base_kaddr = NULL;
4005 
4006 	/* Memory manager related */
4007 	if (!adev->gmc.xgmi.connected_to_cpu) {
4008 		arch_phys_wc_del(adev->gmc.vram_mtrr);
4009 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4010 	}
4011 }
4012 
4013 /**
4014  * amdgpu_device_fini_hw - tear down the driver
4015  *
4016  * @adev: amdgpu_device pointer
4017  *
4018  * Tear down the driver info (all asics).
4019  * Called at driver shutdown.
4020  */
4021 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4022 {
4023 	dev_info(adev->dev, "amdgpu: finishing device.\n");
4024 	flush_delayed_work(&adev->delayed_init_work);
4025 	adev->shutdown = true;
4026 
4027 	/* make sure IB test finished before entering exclusive mode
4028 	 * to avoid preemption on IB test
4029 	 * */
4030 	if (amdgpu_sriov_vf(adev)) {
4031 		amdgpu_virt_request_full_gpu(adev, false);
4032 		amdgpu_virt_fini_data_exchange(adev);
4033 	}
4034 
4035 	/* disable all interrupts */
4036 	amdgpu_irq_disable_all(adev);
4037 	if (adev->mode_info.mode_config_initialized){
4038 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4039 			drm_helper_force_disable_all(adev_to_drm(adev));
4040 		else
4041 			drm_atomic_helper_shutdown(adev_to_drm(adev));
4042 	}
4043 	amdgpu_fence_driver_hw_fini(adev);
4044 
4045 	if (adev->mman.initialized)
4046 		drain_workqueue(adev->mman.bdev.wq);
4047 
4048 	if (adev->pm.sysfs_initialized)
4049 		amdgpu_pm_sysfs_fini(adev);
4050 	if (adev->ucode_sysfs_en)
4051 		amdgpu_ucode_sysfs_fini(adev);
4052 	if (adev->psp_sysfs_en)
4053 		amdgpu_psp_sysfs_fini(adev);
4054 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4055 
4056 	/* disable ras feature must before hw fini */
4057 	amdgpu_ras_pre_fini(adev);
4058 
4059 	amdgpu_device_ip_fini_early(adev);
4060 
4061 	amdgpu_irq_fini_hw(adev);
4062 
4063 	if (adev->mman.initialized)
4064 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4065 
4066 	amdgpu_gart_dummy_page_fini(adev);
4067 
4068 	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4069 		amdgpu_device_unmap_mmio(adev);
4070 
4071 }
4072 
4073 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4074 {
4075 	int idx;
4076 	bool px;
4077 
4078 	amdgpu_fence_driver_sw_fini(adev);
4079 	amdgpu_device_ip_fini(adev);
4080 	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4081 	adev->accel_working = false;
4082 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4083 
4084 	amdgpu_reset_fini(adev);
4085 
4086 	/* free i2c buses */
4087 	if (!amdgpu_device_has_dc_support(adev))
4088 		amdgpu_i2c_fini(adev);
4089 
4090 	if (amdgpu_emu_mode != 1)
4091 		amdgpu_atombios_fini(adev);
4092 
4093 	kfree(adev->bios);
4094 	adev->bios = NULL;
4095 
4096 	px = amdgpu_device_supports_px(adev_to_drm(adev));
4097 
4098 	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
4099 				apple_gmux_detect(NULL, NULL)))
4100 		vga_switcheroo_unregister_client(adev->pdev);
4101 
4102 	if (px)
4103 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4104 
4105 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4106 		vga_client_unregister(adev->pdev);
4107 
4108 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4109 
4110 		iounmap(adev->rmmio);
4111 		adev->rmmio = NULL;
4112 		amdgpu_device_doorbell_fini(adev);
4113 		drm_dev_exit(idx);
4114 	}
4115 
4116 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4117 		amdgpu_pmu_fini(adev);
4118 	if (adev->mman.discovery_bin)
4119 		amdgpu_discovery_fini(adev);
4120 
4121 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4122 	adev->reset_domain = NULL;
4123 
4124 	kfree(adev->pci_state);
4125 
4126 }
4127 
4128 /**
4129  * amdgpu_device_evict_resources - evict device resources
4130  * @adev: amdgpu device object
4131  *
4132  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4133  * of the vram memory type. Mainly used for evicting device resources
4134  * at suspend time.
4135  *
4136  */
4137 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4138 {
4139 	int ret;
4140 
4141 	/* No need to evict vram on APUs for suspend to ram or s2idle */
4142 	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4143 		return 0;
4144 
4145 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4146 	if (ret)
4147 		DRM_WARN("evicting device resources failed\n");
4148 	return ret;
4149 }
4150 
4151 /*
4152  * Suspend & resume.
4153  */
4154 /**
4155  * amdgpu_device_suspend - initiate device suspend
4156  *
4157  * @dev: drm dev pointer
4158  * @fbcon : notify the fbdev of suspend
4159  *
4160  * Puts the hw in the suspend state (all asics).
4161  * Returns 0 for success or an error on failure.
4162  * Called at driver suspend.
4163  */
4164 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4165 {
4166 	struct amdgpu_device *adev = drm_to_adev(dev);
4167 	int r = 0;
4168 
4169 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4170 		return 0;
4171 
4172 	adev->in_suspend = true;
4173 
4174 	/* Evict the majority of BOs before grabbing the full access */
4175 	r = amdgpu_device_evict_resources(adev);
4176 	if (r)
4177 		return r;
4178 
4179 	if (amdgpu_sriov_vf(adev)) {
4180 		amdgpu_virt_fini_data_exchange(adev);
4181 		r = amdgpu_virt_request_full_gpu(adev, false);
4182 		if (r)
4183 			return r;
4184 	}
4185 
4186 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4187 		DRM_WARN("smart shift update failed\n");
4188 
4189 	if (fbcon)
4190 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4191 
4192 	cancel_delayed_work_sync(&adev->delayed_init_work);
4193 
4194 	amdgpu_ras_suspend(adev);
4195 
4196 	amdgpu_device_ip_suspend_phase1(adev);
4197 
4198 	if (!adev->in_s0ix)
4199 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4200 
4201 	r = amdgpu_device_evict_resources(adev);
4202 	if (r)
4203 		return r;
4204 
4205 	amdgpu_fence_driver_hw_fini(adev);
4206 
4207 	amdgpu_device_ip_suspend_phase2(adev);
4208 
4209 	if (amdgpu_sriov_vf(adev))
4210 		amdgpu_virt_release_full_gpu(adev, false);
4211 
4212 	return 0;
4213 }
4214 
4215 /**
4216  * amdgpu_device_resume - initiate device resume
4217  *
4218  * @dev: drm dev pointer
4219  * @fbcon : notify the fbdev of resume
4220  *
4221  * Bring the hw back to operating state (all asics).
4222  * Returns 0 for success or an error on failure.
4223  * Called at driver resume.
4224  */
4225 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4226 {
4227 	struct amdgpu_device *adev = drm_to_adev(dev);
4228 	int r = 0;
4229 
4230 	if (amdgpu_sriov_vf(adev)) {
4231 		r = amdgpu_virt_request_full_gpu(adev, true);
4232 		if (r)
4233 			return r;
4234 	}
4235 
4236 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4237 		return 0;
4238 
4239 	if (adev->in_s0ix)
4240 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4241 
4242 	/* post card */
4243 	if (amdgpu_device_need_post(adev)) {
4244 		r = amdgpu_device_asic_init(adev);
4245 		if (r)
4246 			dev_err(adev->dev, "amdgpu asic init failed\n");
4247 	}
4248 
4249 	r = amdgpu_device_ip_resume(adev);
4250 
4251 	if (r) {
4252 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4253 		goto exit;
4254 	}
4255 	amdgpu_fence_driver_hw_init(adev);
4256 
4257 	r = amdgpu_device_ip_late_init(adev);
4258 	if (r)
4259 		goto exit;
4260 
4261 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4262 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4263 
4264 	if (!adev->in_s0ix) {
4265 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4266 		if (r)
4267 			goto exit;
4268 	}
4269 
4270 exit:
4271 	if (amdgpu_sriov_vf(adev)) {
4272 		amdgpu_virt_init_data_exchange(adev);
4273 		amdgpu_virt_release_full_gpu(adev, true);
4274 	}
4275 
4276 	if (r)
4277 		return r;
4278 
4279 	/* Make sure IB tests flushed */
4280 	flush_delayed_work(&adev->delayed_init_work);
4281 
4282 	if (fbcon)
4283 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4284 
4285 	amdgpu_ras_resume(adev);
4286 
4287 	if (adev->mode_info.num_crtc) {
4288 		/*
4289 		 * Most of the connector probing functions try to acquire runtime pm
4290 		 * refs to ensure that the GPU is powered on when connector polling is
4291 		 * performed. Since we're calling this from a runtime PM callback,
4292 		 * trying to acquire rpm refs will cause us to deadlock.
4293 		 *
4294 		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4295 		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4296 		 */
4297 #ifdef CONFIG_PM
4298 		dev->dev->power.disable_depth++;
4299 #endif
4300 		if (!adev->dc_enabled)
4301 			drm_helper_hpd_irq_event(dev);
4302 		else
4303 			drm_kms_helper_hotplug_event(dev);
4304 #ifdef CONFIG_PM
4305 		dev->dev->power.disable_depth--;
4306 #endif
4307 	}
4308 	adev->in_suspend = false;
4309 
4310 	if (adev->enable_mes)
4311 		amdgpu_mes_self_test(adev);
4312 
4313 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4314 		DRM_WARN("smart shift update failed\n");
4315 
4316 	return 0;
4317 }
4318 
4319 /**
4320  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4321  *
4322  * @adev: amdgpu_device pointer
4323  *
4324  * The list of all the hardware IPs that make up the asic is walked and
4325  * the check_soft_reset callbacks are run.  check_soft_reset determines
4326  * if the asic is still hung or not.
4327  * Returns true if any of the IPs are still in a hung state, false if not.
4328  */
4329 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4330 {
4331 	int i;
4332 	bool asic_hang = false;
4333 
4334 	if (amdgpu_sriov_vf(adev))
4335 		return true;
4336 
4337 	if (amdgpu_asic_need_full_reset(adev))
4338 		return true;
4339 
4340 	for (i = 0; i < adev->num_ip_blocks; i++) {
4341 		if (!adev->ip_blocks[i].status.valid)
4342 			continue;
4343 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4344 			adev->ip_blocks[i].status.hang =
4345 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4346 		if (adev->ip_blocks[i].status.hang) {
4347 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4348 			asic_hang = true;
4349 		}
4350 	}
4351 	return asic_hang;
4352 }
4353 
4354 /**
4355  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4356  *
4357  * @adev: amdgpu_device pointer
4358  *
4359  * The list of all the hardware IPs that make up the asic is walked and the
4360  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4361  * handles any IP specific hardware or software state changes that are
4362  * necessary for a soft reset to succeed.
4363  * Returns 0 on success, negative error code on failure.
4364  */
4365 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4366 {
4367 	int i, r = 0;
4368 
4369 	for (i = 0; i < adev->num_ip_blocks; i++) {
4370 		if (!adev->ip_blocks[i].status.valid)
4371 			continue;
4372 		if (adev->ip_blocks[i].status.hang &&
4373 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4374 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4375 			if (r)
4376 				return r;
4377 		}
4378 	}
4379 
4380 	return 0;
4381 }
4382 
4383 /**
4384  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4385  *
4386  * @adev: amdgpu_device pointer
4387  *
4388  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4389  * reset is necessary to recover.
4390  * Returns true if a full asic reset is required, false if not.
4391  */
4392 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4393 {
4394 	int i;
4395 
4396 	if (amdgpu_asic_need_full_reset(adev))
4397 		return true;
4398 
4399 	for (i = 0; i < adev->num_ip_blocks; i++) {
4400 		if (!adev->ip_blocks[i].status.valid)
4401 			continue;
4402 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4403 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4404 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4405 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4406 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4407 			if (adev->ip_blocks[i].status.hang) {
4408 				dev_info(adev->dev, "Some block need full reset!\n");
4409 				return true;
4410 			}
4411 		}
4412 	}
4413 	return false;
4414 }
4415 
4416 /**
4417  * amdgpu_device_ip_soft_reset - do a soft reset
4418  *
4419  * @adev: amdgpu_device pointer
4420  *
4421  * The list of all the hardware IPs that make up the asic is walked and the
4422  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4423  * IP specific hardware or software state changes that are necessary to soft
4424  * reset the IP.
4425  * Returns 0 on success, negative error code on failure.
4426  */
4427 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4428 {
4429 	int i, r = 0;
4430 
4431 	for (i = 0; i < adev->num_ip_blocks; i++) {
4432 		if (!adev->ip_blocks[i].status.valid)
4433 			continue;
4434 		if (adev->ip_blocks[i].status.hang &&
4435 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4436 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4437 			if (r)
4438 				return r;
4439 		}
4440 	}
4441 
4442 	return 0;
4443 }
4444 
4445 /**
4446  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4447  *
4448  * @adev: amdgpu_device pointer
4449  *
4450  * The list of all the hardware IPs that make up the asic is walked and the
4451  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4452  * handles any IP specific hardware or software state changes that are
4453  * necessary after the IP has been soft reset.
4454  * Returns 0 on success, negative error code on failure.
4455  */
4456 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4457 {
4458 	int i, r = 0;
4459 
4460 	for (i = 0; i < adev->num_ip_blocks; i++) {
4461 		if (!adev->ip_blocks[i].status.valid)
4462 			continue;
4463 		if (adev->ip_blocks[i].status.hang &&
4464 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4465 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4466 		if (r)
4467 			return r;
4468 	}
4469 
4470 	return 0;
4471 }
4472 
4473 /**
4474  * amdgpu_device_recover_vram - Recover some VRAM contents
4475  *
4476  * @adev: amdgpu_device pointer
4477  *
4478  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4479  * restore things like GPUVM page tables after a GPU reset where
4480  * the contents of VRAM might be lost.
4481  *
4482  * Returns:
4483  * 0 on success, negative error code on failure.
4484  */
4485 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4486 {
4487 	struct dma_fence *fence = NULL, *next = NULL;
4488 	struct amdgpu_bo *shadow;
4489 	struct amdgpu_bo_vm *vmbo;
4490 	long r = 1, tmo;
4491 
4492 	if (amdgpu_sriov_runtime(adev))
4493 		tmo = msecs_to_jiffies(8000);
4494 	else
4495 		tmo = msecs_to_jiffies(100);
4496 
4497 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4498 	mutex_lock(&adev->shadow_list_lock);
4499 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4500 		shadow = &vmbo->bo;
4501 		/* No need to recover an evicted BO */
4502 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4503 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4504 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4505 			continue;
4506 
4507 		r = amdgpu_bo_restore_shadow(shadow, &next);
4508 		if (r)
4509 			break;
4510 
4511 		if (fence) {
4512 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4513 			dma_fence_put(fence);
4514 			fence = next;
4515 			if (tmo == 0) {
4516 				r = -ETIMEDOUT;
4517 				break;
4518 			} else if (tmo < 0) {
4519 				r = tmo;
4520 				break;
4521 			}
4522 		} else {
4523 			fence = next;
4524 		}
4525 	}
4526 	mutex_unlock(&adev->shadow_list_lock);
4527 
4528 	if (fence)
4529 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4530 	dma_fence_put(fence);
4531 
4532 	if (r < 0 || tmo <= 0) {
4533 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4534 		return -EIO;
4535 	}
4536 
4537 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4538 	return 0;
4539 }
4540 
4541 
4542 /**
4543  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4544  *
4545  * @adev: amdgpu_device pointer
4546  * @from_hypervisor: request from hypervisor
4547  *
4548  * do VF FLR and reinitialize Asic
4549  * return 0 means succeeded otherwise failed
4550  */
4551 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4552 				     bool from_hypervisor)
4553 {
4554 	int r;
4555 	struct amdgpu_hive_info *hive = NULL;
4556 	int retry_limit = 0;
4557 
4558 retry:
4559 	amdgpu_amdkfd_pre_reset(adev);
4560 
4561 	if (from_hypervisor)
4562 		r = amdgpu_virt_request_full_gpu(adev, true);
4563 	else
4564 		r = amdgpu_virt_reset_gpu(adev);
4565 	if (r)
4566 		return r;
4567 
4568 	/* Resume IP prior to SMC */
4569 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4570 	if (r)
4571 		goto error;
4572 
4573 	amdgpu_virt_init_data_exchange(adev);
4574 
4575 	r = amdgpu_device_fw_loading(adev);
4576 	if (r)
4577 		return r;
4578 
4579 	/* now we are okay to resume SMC/CP/SDMA */
4580 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4581 	if (r)
4582 		goto error;
4583 
4584 	hive = amdgpu_get_xgmi_hive(adev);
4585 	/* Update PSP FW topology after reset */
4586 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4587 		r = amdgpu_xgmi_update_topology(hive, adev);
4588 
4589 	if (hive)
4590 		amdgpu_put_xgmi_hive(hive);
4591 
4592 	if (!r) {
4593 		amdgpu_irq_gpu_reset_resume_helper(adev);
4594 		r = amdgpu_ib_ring_tests(adev);
4595 
4596 		amdgpu_amdkfd_post_reset(adev);
4597 	}
4598 
4599 error:
4600 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4601 		amdgpu_inc_vram_lost(adev);
4602 		r = amdgpu_device_recover_vram(adev);
4603 	}
4604 	amdgpu_virt_release_full_gpu(adev, true);
4605 
4606 	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4607 		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4608 			retry_limit++;
4609 			goto retry;
4610 		} else
4611 			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4612 	}
4613 
4614 	return r;
4615 }
4616 
4617 /**
4618  * amdgpu_device_has_job_running - check if there is any job in mirror list
4619  *
4620  * @adev: amdgpu_device pointer
4621  *
4622  * check if there is any job in mirror list
4623  */
4624 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4625 {
4626 	int i;
4627 	struct drm_sched_job *job;
4628 
4629 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4630 		struct amdgpu_ring *ring = adev->rings[i];
4631 
4632 		if (!ring || !ring->sched.thread)
4633 			continue;
4634 
4635 		spin_lock(&ring->sched.job_list_lock);
4636 		job = list_first_entry_or_null(&ring->sched.pending_list,
4637 					       struct drm_sched_job, list);
4638 		spin_unlock(&ring->sched.job_list_lock);
4639 		if (job)
4640 			return true;
4641 	}
4642 	return false;
4643 }
4644 
4645 /**
4646  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4647  *
4648  * @adev: amdgpu_device pointer
4649  *
4650  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4651  * a hung GPU.
4652  */
4653 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4654 {
4655 
4656 	if (amdgpu_gpu_recovery == 0)
4657 		goto disabled;
4658 
4659 	/* Skip soft reset check in fatal error mode */
4660 	if (!amdgpu_ras_is_poison_mode_supported(adev))
4661 		return true;
4662 
4663 	if (amdgpu_sriov_vf(adev))
4664 		return true;
4665 
4666 	if (amdgpu_gpu_recovery == -1) {
4667 		switch (adev->asic_type) {
4668 #ifdef CONFIG_DRM_AMDGPU_SI
4669 		case CHIP_VERDE:
4670 		case CHIP_TAHITI:
4671 		case CHIP_PITCAIRN:
4672 		case CHIP_OLAND:
4673 		case CHIP_HAINAN:
4674 #endif
4675 #ifdef CONFIG_DRM_AMDGPU_CIK
4676 		case CHIP_KAVERI:
4677 		case CHIP_KABINI:
4678 		case CHIP_MULLINS:
4679 #endif
4680 		case CHIP_CARRIZO:
4681 		case CHIP_STONEY:
4682 		case CHIP_CYAN_SKILLFISH:
4683 			goto disabled;
4684 		default:
4685 			break;
4686 		}
4687 	}
4688 
4689 	return true;
4690 
4691 disabled:
4692 		dev_info(adev->dev, "GPU recovery disabled.\n");
4693 		return false;
4694 }
4695 
4696 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4697 {
4698         u32 i;
4699         int ret = 0;
4700 
4701         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4702 
4703         dev_info(adev->dev, "GPU mode1 reset\n");
4704 
4705         /* disable BM */
4706         pci_clear_master(adev->pdev);
4707 
4708         amdgpu_device_cache_pci_state(adev->pdev);
4709 
4710         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4711                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4712                 ret = amdgpu_dpm_mode1_reset(adev);
4713         } else {
4714                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4715                 ret = psp_gpu_reset(adev);
4716         }
4717 
4718         if (ret)
4719                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4720 
4721         amdgpu_device_load_pci_state(adev->pdev);
4722 
4723         /* wait for asic to come out of reset */
4724         for (i = 0; i < adev->usec_timeout; i++) {
4725                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4726 
4727                 if (memsize != 0xffffffff)
4728                         break;
4729                 udelay(1);
4730         }
4731 
4732         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4733         return ret;
4734 }
4735 
4736 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4737 				 struct amdgpu_reset_context *reset_context)
4738 {
4739 	int i, r = 0;
4740 	struct amdgpu_job *job = NULL;
4741 	bool need_full_reset =
4742 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4743 
4744 	if (reset_context->reset_req_dev == adev)
4745 		job = reset_context->job;
4746 
4747 	if (amdgpu_sriov_vf(adev)) {
4748 		/* stop the data exchange thread */
4749 		amdgpu_virt_fini_data_exchange(adev);
4750 	}
4751 
4752 	amdgpu_fence_driver_isr_toggle(adev, true);
4753 
4754 	/* block all schedulers and reset given job's ring */
4755 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4756 		struct amdgpu_ring *ring = adev->rings[i];
4757 
4758 		if (!ring || !ring->sched.thread)
4759 			continue;
4760 
4761 		/*clear job fence from fence drv to avoid force_completion
4762 		 *leave NULL and vm flush fence in fence drv */
4763 		amdgpu_fence_driver_clear_job_fences(ring);
4764 
4765 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4766 		amdgpu_fence_driver_force_completion(ring);
4767 	}
4768 
4769 	amdgpu_fence_driver_isr_toggle(adev, false);
4770 
4771 	if (job && job->vm)
4772 		drm_sched_increase_karma(&job->base);
4773 
4774 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4775 	/* If reset handler not implemented, continue; otherwise return */
4776 	if (r == -ENOSYS)
4777 		r = 0;
4778 	else
4779 		return r;
4780 
4781 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4782 	if (!amdgpu_sriov_vf(adev)) {
4783 
4784 		if (!need_full_reset)
4785 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4786 
4787 		if (!need_full_reset && amdgpu_gpu_recovery &&
4788 		    amdgpu_device_ip_check_soft_reset(adev)) {
4789 			amdgpu_device_ip_pre_soft_reset(adev);
4790 			r = amdgpu_device_ip_soft_reset(adev);
4791 			amdgpu_device_ip_post_soft_reset(adev);
4792 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4793 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4794 				need_full_reset = true;
4795 			}
4796 		}
4797 
4798 		if (need_full_reset)
4799 			r = amdgpu_device_ip_suspend(adev);
4800 		if (need_full_reset)
4801 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4802 		else
4803 			clear_bit(AMDGPU_NEED_FULL_RESET,
4804 				  &reset_context->flags);
4805 	}
4806 
4807 	return r;
4808 }
4809 
4810 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4811 {
4812 	int i;
4813 
4814 	lockdep_assert_held(&adev->reset_domain->sem);
4815 
4816 	for (i = 0; i < adev->num_regs; i++) {
4817 		adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4818 		trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4819 					     adev->reset_dump_reg_value[i]);
4820 	}
4821 
4822 	return 0;
4823 }
4824 
4825 #ifdef CONFIG_DEV_COREDUMP
4826 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4827 		size_t count, void *data, size_t datalen)
4828 {
4829 	struct drm_printer p;
4830 	struct amdgpu_device *adev = data;
4831 	struct drm_print_iterator iter;
4832 	int i;
4833 
4834 	iter.data = buffer;
4835 	iter.offset = 0;
4836 	iter.start = offset;
4837 	iter.remain = count;
4838 
4839 	p = drm_coredump_printer(&iter);
4840 
4841 	drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4842 	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4843 	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4844 	drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4845 	if (adev->reset_task_info.pid)
4846 		drm_printf(&p, "process_name: %s PID: %d\n",
4847 			   adev->reset_task_info.process_name,
4848 			   adev->reset_task_info.pid);
4849 
4850 	if (adev->reset_vram_lost)
4851 		drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4852 	if (adev->num_regs) {
4853 		drm_printf(&p, "AMDGPU register dumps:\nOffset:     Value:\n");
4854 
4855 		for (i = 0; i < adev->num_regs; i++)
4856 			drm_printf(&p, "0x%08x: 0x%08x\n",
4857 				   adev->reset_dump_reg_list[i],
4858 				   adev->reset_dump_reg_value[i]);
4859 	}
4860 
4861 	return count - iter.remain;
4862 }
4863 
4864 static void amdgpu_devcoredump_free(void *data)
4865 {
4866 }
4867 
4868 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4869 {
4870 	struct drm_device *dev = adev_to_drm(adev);
4871 
4872 	ktime_get_ts64(&adev->reset_time);
4873 	dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
4874 		      amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4875 }
4876 #endif
4877 
4878 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4879 			 struct amdgpu_reset_context *reset_context)
4880 {
4881 	struct amdgpu_device *tmp_adev = NULL;
4882 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4883 	int r = 0;
4884 	bool gpu_reset_for_dev_remove = 0;
4885 
4886 	/* Try reset handler method first */
4887 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4888 				    reset_list);
4889 	amdgpu_reset_reg_dumps(tmp_adev);
4890 
4891 	reset_context->reset_device_list = device_list_handle;
4892 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4893 	/* If reset handler not implemented, continue; otherwise return */
4894 	if (r == -ENOSYS)
4895 		r = 0;
4896 	else
4897 		return r;
4898 
4899 	/* Reset handler not implemented, use the default method */
4900 	need_full_reset =
4901 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4902 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4903 
4904 	gpu_reset_for_dev_remove =
4905 		test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4906 			test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4907 
4908 	/*
4909 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4910 	 * to allow proper links negotiation in FW (within 1 sec)
4911 	 */
4912 	if (!skip_hw_reset && need_full_reset) {
4913 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4914 			/* For XGMI run all resets in parallel to speed up the process */
4915 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4916 				tmp_adev->gmc.xgmi.pending_reset = false;
4917 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4918 					r = -EALREADY;
4919 			} else
4920 				r = amdgpu_asic_reset(tmp_adev);
4921 
4922 			if (r) {
4923 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4924 					 r, adev_to_drm(tmp_adev)->unique);
4925 				break;
4926 			}
4927 		}
4928 
4929 		/* For XGMI wait for all resets to complete before proceed */
4930 		if (!r) {
4931 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4932 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4933 					flush_work(&tmp_adev->xgmi_reset_work);
4934 					r = tmp_adev->asic_reset_res;
4935 					if (r)
4936 						break;
4937 				}
4938 			}
4939 		}
4940 	}
4941 
4942 	if (!r && amdgpu_ras_intr_triggered()) {
4943 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4944 			if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4945 			    tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4946 				tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4947 		}
4948 
4949 		amdgpu_ras_intr_cleared();
4950 	}
4951 
4952 	/* Since the mode1 reset affects base ip blocks, the
4953 	 * phase1 ip blocks need to be resumed. Otherwise there
4954 	 * will be a BIOS signature error and the psp bootloader
4955 	 * can't load kdb on the next amdgpu install.
4956 	 */
4957 	if (gpu_reset_for_dev_remove) {
4958 		list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4959 			amdgpu_device_ip_resume_phase1(tmp_adev);
4960 
4961 		goto end;
4962 	}
4963 
4964 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4965 		if (need_full_reset) {
4966 			/* post card */
4967 			r = amdgpu_device_asic_init(tmp_adev);
4968 			if (r) {
4969 				dev_warn(tmp_adev->dev, "asic atom init failed!");
4970 			} else {
4971 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4972 				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4973 				if (r)
4974 					goto out;
4975 
4976 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4977 				if (r)
4978 					goto out;
4979 
4980 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4981 #ifdef CONFIG_DEV_COREDUMP
4982 				tmp_adev->reset_vram_lost = vram_lost;
4983 				memset(&tmp_adev->reset_task_info, 0,
4984 						sizeof(tmp_adev->reset_task_info));
4985 				if (reset_context->job && reset_context->job->vm)
4986 					tmp_adev->reset_task_info =
4987 						reset_context->job->vm->task_info;
4988 				amdgpu_reset_capture_coredumpm(tmp_adev);
4989 #endif
4990 				if (vram_lost) {
4991 					DRM_INFO("VRAM is lost due to GPU reset!\n");
4992 					amdgpu_inc_vram_lost(tmp_adev);
4993 				}
4994 
4995 				r = amdgpu_device_fw_loading(tmp_adev);
4996 				if (r)
4997 					return r;
4998 
4999 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5000 				if (r)
5001 					goto out;
5002 
5003 				if (vram_lost)
5004 					amdgpu_device_fill_reset_magic(tmp_adev);
5005 
5006 				/*
5007 				 * Add this ASIC as tracked as reset was already
5008 				 * complete successfully.
5009 				 */
5010 				amdgpu_register_gpu_instance(tmp_adev);
5011 
5012 				if (!reset_context->hive &&
5013 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5014 					amdgpu_xgmi_add_device(tmp_adev);
5015 
5016 				r = amdgpu_device_ip_late_init(tmp_adev);
5017 				if (r)
5018 					goto out;
5019 
5020 				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5021 
5022 				/*
5023 				 * The GPU enters bad state once faulty pages
5024 				 * by ECC has reached the threshold, and ras
5025 				 * recovery is scheduled next. So add one check
5026 				 * here to break recovery if it indeed exceeds
5027 				 * bad page threshold, and remind user to
5028 				 * retire this GPU or setting one bigger
5029 				 * bad_page_threshold value to fix this once
5030 				 * probing driver again.
5031 				 */
5032 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5033 					/* must succeed. */
5034 					amdgpu_ras_resume(tmp_adev);
5035 				} else {
5036 					r = -EINVAL;
5037 					goto out;
5038 				}
5039 
5040 				/* Update PSP FW topology after reset */
5041 				if (reset_context->hive &&
5042 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5043 					r = amdgpu_xgmi_update_topology(
5044 						reset_context->hive, tmp_adev);
5045 			}
5046 		}
5047 
5048 out:
5049 		if (!r) {
5050 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5051 			r = amdgpu_ib_ring_tests(tmp_adev);
5052 			if (r) {
5053 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5054 				need_full_reset = true;
5055 				r = -EAGAIN;
5056 				goto end;
5057 			}
5058 		}
5059 
5060 		if (!r)
5061 			r = amdgpu_device_recover_vram(tmp_adev);
5062 		else
5063 			tmp_adev->asic_reset_res = r;
5064 	}
5065 
5066 end:
5067 	if (need_full_reset)
5068 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5069 	else
5070 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5071 	return r;
5072 }
5073 
5074 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5075 {
5076 
5077 	switch (amdgpu_asic_reset_method(adev)) {
5078 	case AMD_RESET_METHOD_MODE1:
5079 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5080 		break;
5081 	case AMD_RESET_METHOD_MODE2:
5082 		adev->mp1_state = PP_MP1_STATE_RESET;
5083 		break;
5084 	default:
5085 		adev->mp1_state = PP_MP1_STATE_NONE;
5086 		break;
5087 	}
5088 }
5089 
5090 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5091 {
5092 	amdgpu_vf_error_trans_all(adev);
5093 	adev->mp1_state = PP_MP1_STATE_NONE;
5094 }
5095 
5096 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5097 {
5098 	struct pci_dev *p = NULL;
5099 
5100 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5101 			adev->pdev->bus->number, 1);
5102 	if (p) {
5103 		pm_runtime_enable(&(p->dev));
5104 		pm_runtime_resume(&(p->dev));
5105 	}
5106 
5107 	pci_dev_put(p);
5108 }
5109 
5110 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5111 {
5112 	enum amd_reset_method reset_method;
5113 	struct pci_dev *p = NULL;
5114 	u64 expires;
5115 
5116 	/*
5117 	 * For now, only BACO and mode1 reset are confirmed
5118 	 * to suffer the audio issue without proper suspended.
5119 	 */
5120 	reset_method = amdgpu_asic_reset_method(adev);
5121 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5122 	     (reset_method != AMD_RESET_METHOD_MODE1))
5123 		return -EINVAL;
5124 
5125 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5126 			adev->pdev->bus->number, 1);
5127 	if (!p)
5128 		return -ENODEV;
5129 
5130 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5131 	if (!expires)
5132 		/*
5133 		 * If we cannot get the audio device autosuspend delay,
5134 		 * a fixed 4S interval will be used. Considering 3S is
5135 		 * the audio controller default autosuspend delay setting.
5136 		 * 4S used here is guaranteed to cover that.
5137 		 */
5138 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5139 
5140 	while (!pm_runtime_status_suspended(&(p->dev))) {
5141 		if (!pm_runtime_suspend(&(p->dev)))
5142 			break;
5143 
5144 		if (expires < ktime_get_mono_fast_ns()) {
5145 			dev_warn(adev->dev, "failed to suspend display audio\n");
5146 			pci_dev_put(p);
5147 			/* TODO: abort the succeeding gpu reset? */
5148 			return -ETIMEDOUT;
5149 		}
5150 	}
5151 
5152 	pm_runtime_disable(&(p->dev));
5153 
5154 	pci_dev_put(p);
5155 	return 0;
5156 }
5157 
5158 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5159 {
5160 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5161 
5162 #if defined(CONFIG_DEBUG_FS)
5163 	if (!amdgpu_sriov_vf(adev))
5164 		cancel_work(&adev->reset_work);
5165 #endif
5166 
5167 	if (adev->kfd.dev)
5168 		cancel_work(&adev->kfd.reset_work);
5169 
5170 	if (amdgpu_sriov_vf(adev))
5171 		cancel_work(&adev->virt.flr_work);
5172 
5173 	if (con && adev->ras_enabled)
5174 		cancel_work(&con->recovery_work);
5175 
5176 }
5177 
5178 /**
5179  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5180  *
5181  * @adev: amdgpu_device pointer
5182  * @job: which job trigger hang
5183  * @reset_context: amdgpu reset context pointer
5184  *
5185  * Attempt to reset the GPU if it has hung (all asics).
5186  * Attempt to do soft-reset or full-reset and reinitialize Asic
5187  * Returns 0 for success or an error on failure.
5188  */
5189 
5190 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5191 			      struct amdgpu_job *job,
5192 			      struct amdgpu_reset_context *reset_context)
5193 {
5194 	struct list_head device_list, *device_list_handle =  NULL;
5195 	bool job_signaled = false;
5196 	struct amdgpu_hive_info *hive = NULL;
5197 	struct amdgpu_device *tmp_adev = NULL;
5198 	int i, r = 0;
5199 	bool need_emergency_restart = false;
5200 	bool audio_suspended = false;
5201 	bool gpu_reset_for_dev_remove = false;
5202 
5203 	gpu_reset_for_dev_remove =
5204 			test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5205 				test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5206 
5207 	/*
5208 	 * Special case: RAS triggered and full reset isn't supported
5209 	 */
5210 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5211 
5212 	/*
5213 	 * Flush RAM to disk so that after reboot
5214 	 * the user can read log and see why the system rebooted.
5215 	 */
5216 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5217 		DRM_WARN("Emergency reboot.");
5218 
5219 		ksys_sync_helper();
5220 		emergency_restart();
5221 	}
5222 
5223 	dev_info(adev->dev, "GPU %s begin!\n",
5224 		need_emergency_restart ? "jobs stop":"reset");
5225 
5226 	if (!amdgpu_sriov_vf(adev))
5227 		hive = amdgpu_get_xgmi_hive(adev);
5228 	if (hive)
5229 		mutex_lock(&hive->hive_lock);
5230 
5231 	reset_context->job = job;
5232 	reset_context->hive = hive;
5233 	/*
5234 	 * Build list of devices to reset.
5235 	 * In case we are in XGMI hive mode, resort the device list
5236 	 * to put adev in the 1st position.
5237 	 */
5238 	INIT_LIST_HEAD(&device_list);
5239 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5240 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5241 			list_add_tail(&tmp_adev->reset_list, &device_list);
5242 			if (gpu_reset_for_dev_remove && adev->shutdown)
5243 				tmp_adev->shutdown = true;
5244 		}
5245 		if (!list_is_first(&adev->reset_list, &device_list))
5246 			list_rotate_to_front(&adev->reset_list, &device_list);
5247 		device_list_handle = &device_list;
5248 	} else {
5249 		list_add_tail(&adev->reset_list, &device_list);
5250 		device_list_handle = &device_list;
5251 	}
5252 
5253 	/* We need to lock reset domain only once both for XGMI and single device */
5254 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5255 				    reset_list);
5256 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5257 
5258 	/* block all schedulers and reset given job's ring */
5259 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5260 
5261 		amdgpu_device_set_mp1_state(tmp_adev);
5262 
5263 		/*
5264 		 * Try to put the audio codec into suspend state
5265 		 * before gpu reset started.
5266 		 *
5267 		 * Due to the power domain of the graphics device
5268 		 * is shared with AZ power domain. Without this,
5269 		 * we may change the audio hardware from behind
5270 		 * the audio driver's back. That will trigger
5271 		 * some audio codec errors.
5272 		 */
5273 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5274 			audio_suspended = true;
5275 
5276 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5277 
5278 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5279 
5280 		if (!amdgpu_sriov_vf(tmp_adev))
5281 			amdgpu_amdkfd_pre_reset(tmp_adev);
5282 
5283 		/*
5284 		 * Mark these ASICs to be reseted as untracked first
5285 		 * And add them back after reset completed
5286 		 */
5287 		amdgpu_unregister_gpu_instance(tmp_adev);
5288 
5289 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5290 
5291 		/* disable ras on ALL IPs */
5292 		if (!need_emergency_restart &&
5293 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5294 			amdgpu_ras_suspend(tmp_adev);
5295 
5296 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5297 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5298 
5299 			if (!ring || !ring->sched.thread)
5300 				continue;
5301 
5302 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5303 
5304 			if (need_emergency_restart)
5305 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5306 		}
5307 		atomic_inc(&tmp_adev->gpu_reset_counter);
5308 	}
5309 
5310 	if (need_emergency_restart)
5311 		goto skip_sched_resume;
5312 
5313 	/*
5314 	 * Must check guilty signal here since after this point all old
5315 	 * HW fences are force signaled.
5316 	 *
5317 	 * job->base holds a reference to parent fence
5318 	 */
5319 	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5320 		job_signaled = true;
5321 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5322 		goto skip_hw_reset;
5323 	}
5324 
5325 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5326 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5327 		if (gpu_reset_for_dev_remove) {
5328 			/* Workaroud for ASICs need to disable SMC first */
5329 			amdgpu_device_smu_fini_early(tmp_adev);
5330 		}
5331 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5332 		/*TODO Should we stop ?*/
5333 		if (r) {
5334 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5335 				  r, adev_to_drm(tmp_adev)->unique);
5336 			tmp_adev->asic_reset_res = r;
5337 		}
5338 
5339 		/*
5340 		 * Drop all pending non scheduler resets. Scheduler resets
5341 		 * were already dropped during drm_sched_stop
5342 		 */
5343 		amdgpu_device_stop_pending_resets(tmp_adev);
5344 	}
5345 
5346 	/* Actual ASIC resets if needed.*/
5347 	/* Host driver will handle XGMI hive reset for SRIOV */
5348 	if (amdgpu_sriov_vf(adev)) {
5349 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5350 		if (r)
5351 			adev->asic_reset_res = r;
5352 
5353 		/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5354 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
5355 		    adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))
5356 			amdgpu_ras_resume(adev);
5357 	} else {
5358 		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5359 		if (r && r == -EAGAIN)
5360 			goto retry;
5361 
5362 		if (!r && gpu_reset_for_dev_remove)
5363 			goto recover_end;
5364 	}
5365 
5366 skip_hw_reset:
5367 
5368 	/* Post ASIC reset for all devs .*/
5369 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5370 
5371 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5372 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5373 
5374 			if (!ring || !ring->sched.thread)
5375 				continue;
5376 
5377 			drm_sched_start(&ring->sched, true);
5378 		}
5379 
5380 		if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5381 			amdgpu_mes_self_test(tmp_adev);
5382 
5383 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5384 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5385 		}
5386 
5387 		if (tmp_adev->asic_reset_res)
5388 			r = tmp_adev->asic_reset_res;
5389 
5390 		tmp_adev->asic_reset_res = 0;
5391 
5392 		if (r) {
5393 			/* bad news, how to tell it to userspace ? */
5394 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5395 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5396 		} else {
5397 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5398 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5399 				DRM_WARN("smart shift update failed\n");
5400 		}
5401 	}
5402 
5403 skip_sched_resume:
5404 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5405 		/* unlock kfd: SRIOV would do it separately */
5406 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5407 			amdgpu_amdkfd_post_reset(tmp_adev);
5408 
5409 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5410 		 * need to bring up kfd here if it's not be initialized before
5411 		 */
5412 		if (!adev->kfd.init_complete)
5413 			amdgpu_amdkfd_device_init(adev);
5414 
5415 		if (audio_suspended)
5416 			amdgpu_device_resume_display_audio(tmp_adev);
5417 
5418 		amdgpu_device_unset_mp1_state(tmp_adev);
5419 
5420 		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5421 	}
5422 
5423 recover_end:
5424 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5425 					    reset_list);
5426 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5427 
5428 	if (hive) {
5429 		mutex_unlock(&hive->hive_lock);
5430 		amdgpu_put_xgmi_hive(hive);
5431 	}
5432 
5433 	if (r)
5434 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5435 
5436 	atomic_set(&adev->reset_domain->reset_res, r);
5437 	return r;
5438 }
5439 
5440 /**
5441  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5442  *
5443  * @adev: amdgpu_device pointer
5444  *
5445  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5446  * and lanes) of the slot the device is in. Handles APUs and
5447  * virtualized environments where PCIE config space may not be available.
5448  */
5449 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5450 {
5451 	struct pci_dev *pdev;
5452 	enum pci_bus_speed speed_cap, platform_speed_cap;
5453 	enum pcie_link_width platform_link_width;
5454 
5455 	if (amdgpu_pcie_gen_cap)
5456 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5457 
5458 	if (amdgpu_pcie_lane_cap)
5459 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5460 
5461 	/* covers APUs as well */
5462 	if (pci_is_root_bus(adev->pdev->bus)) {
5463 		if (adev->pm.pcie_gen_mask == 0)
5464 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5465 		if (adev->pm.pcie_mlw_mask == 0)
5466 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5467 		return;
5468 	}
5469 
5470 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5471 		return;
5472 
5473 	pcie_bandwidth_available(adev->pdev, NULL,
5474 				 &platform_speed_cap, &platform_link_width);
5475 
5476 	if (adev->pm.pcie_gen_mask == 0) {
5477 		/* asic caps */
5478 		pdev = adev->pdev;
5479 		speed_cap = pcie_get_speed_cap(pdev);
5480 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5481 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5482 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5483 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5484 		} else {
5485 			if (speed_cap == PCIE_SPEED_32_0GT)
5486 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5487 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5488 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5489 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5490 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5491 			else if (speed_cap == PCIE_SPEED_16_0GT)
5492 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5493 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5494 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5495 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5496 			else if (speed_cap == PCIE_SPEED_8_0GT)
5497 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5498 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5499 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5500 			else if (speed_cap == PCIE_SPEED_5_0GT)
5501 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5502 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5503 			else
5504 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5505 		}
5506 		/* platform caps */
5507 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5508 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5509 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5510 		} else {
5511 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5512 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5513 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5514 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5515 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5516 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5517 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5518 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5519 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5520 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5521 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5522 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5523 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5524 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5525 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5526 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5527 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5528 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5529 			else
5530 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5531 
5532 		}
5533 	}
5534 	if (adev->pm.pcie_mlw_mask == 0) {
5535 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5536 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5537 		} else {
5538 			switch (platform_link_width) {
5539 			case PCIE_LNK_X32:
5540 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5541 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5542 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5543 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5544 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5545 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5546 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5547 				break;
5548 			case PCIE_LNK_X16:
5549 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5550 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5551 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5552 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5553 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5554 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5555 				break;
5556 			case PCIE_LNK_X12:
5557 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5558 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5559 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5560 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5561 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5562 				break;
5563 			case PCIE_LNK_X8:
5564 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5565 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5566 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5567 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5568 				break;
5569 			case PCIE_LNK_X4:
5570 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5571 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5572 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5573 				break;
5574 			case PCIE_LNK_X2:
5575 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5576 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5577 				break;
5578 			case PCIE_LNK_X1:
5579 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5580 				break;
5581 			default:
5582 				break;
5583 			}
5584 		}
5585 	}
5586 }
5587 
5588 /**
5589  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5590  *
5591  * @adev: amdgpu_device pointer
5592  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5593  *
5594  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5595  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5596  * @peer_adev.
5597  */
5598 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5599 				      struct amdgpu_device *peer_adev)
5600 {
5601 #ifdef CONFIG_HSA_AMD_P2P
5602 	uint64_t address_mask = peer_adev->dev->dma_mask ?
5603 		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5604 	resource_size_t aper_limit =
5605 		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5606 	bool p2p_access =
5607 		!adev->gmc.xgmi.connected_to_cpu &&
5608 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5609 
5610 	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5611 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5612 		!(adev->gmc.aper_base & address_mask ||
5613 		  aper_limit & address_mask));
5614 #else
5615 	return false;
5616 #endif
5617 }
5618 
5619 int amdgpu_device_baco_enter(struct drm_device *dev)
5620 {
5621 	struct amdgpu_device *adev = drm_to_adev(dev);
5622 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5623 
5624 	if (!amdgpu_device_supports_baco(dev))
5625 		return -ENOTSUPP;
5626 
5627 	if (ras && adev->ras_enabled &&
5628 	    adev->nbio.funcs->enable_doorbell_interrupt)
5629 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5630 
5631 	return amdgpu_dpm_baco_enter(adev);
5632 }
5633 
5634 int amdgpu_device_baco_exit(struct drm_device *dev)
5635 {
5636 	struct amdgpu_device *adev = drm_to_adev(dev);
5637 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5638 	int ret = 0;
5639 
5640 	if (!amdgpu_device_supports_baco(dev))
5641 		return -ENOTSUPP;
5642 
5643 	ret = amdgpu_dpm_baco_exit(adev);
5644 	if (ret)
5645 		return ret;
5646 
5647 	if (ras && adev->ras_enabled &&
5648 	    adev->nbio.funcs->enable_doorbell_interrupt)
5649 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5650 
5651 	if (amdgpu_passthrough(adev) &&
5652 	    adev->nbio.funcs->clear_doorbell_interrupt)
5653 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5654 
5655 	return 0;
5656 }
5657 
5658 /**
5659  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5660  * @pdev: PCI device struct
5661  * @state: PCI channel state
5662  *
5663  * Description: Called when a PCI error is detected.
5664  *
5665  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5666  */
5667 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5668 {
5669 	struct drm_device *dev = pci_get_drvdata(pdev);
5670 	struct amdgpu_device *adev = drm_to_adev(dev);
5671 	int i;
5672 
5673 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5674 
5675 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5676 		DRM_WARN("No support for XGMI hive yet...");
5677 		return PCI_ERS_RESULT_DISCONNECT;
5678 	}
5679 
5680 	adev->pci_channel_state = state;
5681 
5682 	switch (state) {
5683 	case pci_channel_io_normal:
5684 		return PCI_ERS_RESULT_CAN_RECOVER;
5685 	/* Fatal error, prepare for slot reset */
5686 	case pci_channel_io_frozen:
5687 		/*
5688 		 * Locking adev->reset_domain->sem will prevent any external access
5689 		 * to GPU during PCI error recovery
5690 		 */
5691 		amdgpu_device_lock_reset_domain(adev->reset_domain);
5692 		amdgpu_device_set_mp1_state(adev);
5693 
5694 		/*
5695 		 * Block any work scheduling as we do for regular GPU reset
5696 		 * for the duration of the recovery
5697 		 */
5698 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5699 			struct amdgpu_ring *ring = adev->rings[i];
5700 
5701 			if (!ring || !ring->sched.thread)
5702 				continue;
5703 
5704 			drm_sched_stop(&ring->sched, NULL);
5705 		}
5706 		atomic_inc(&adev->gpu_reset_counter);
5707 		return PCI_ERS_RESULT_NEED_RESET;
5708 	case pci_channel_io_perm_failure:
5709 		/* Permanent error, prepare for device removal */
5710 		return PCI_ERS_RESULT_DISCONNECT;
5711 	}
5712 
5713 	return PCI_ERS_RESULT_NEED_RESET;
5714 }
5715 
5716 /**
5717  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5718  * @pdev: pointer to PCI device
5719  */
5720 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5721 {
5722 
5723 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5724 
5725 	/* TODO - dump whatever for debugging purposes */
5726 
5727 	/* This called only if amdgpu_pci_error_detected returns
5728 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5729 	 * works, no need to reset slot.
5730 	 */
5731 
5732 	return PCI_ERS_RESULT_RECOVERED;
5733 }
5734 
5735 /**
5736  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5737  * @pdev: PCI device struct
5738  *
5739  * Description: This routine is called by the pci error recovery
5740  * code after the PCI slot has been reset, just before we
5741  * should resume normal operations.
5742  */
5743 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5744 {
5745 	struct drm_device *dev = pci_get_drvdata(pdev);
5746 	struct amdgpu_device *adev = drm_to_adev(dev);
5747 	int r, i;
5748 	struct amdgpu_reset_context reset_context;
5749 	u32 memsize;
5750 	struct list_head device_list;
5751 
5752 	DRM_INFO("PCI error: slot reset callback!!\n");
5753 
5754 	memset(&reset_context, 0, sizeof(reset_context));
5755 
5756 	INIT_LIST_HEAD(&device_list);
5757 	list_add_tail(&adev->reset_list, &device_list);
5758 
5759 	/* wait for asic to come out of reset */
5760 	msleep(500);
5761 
5762 	/* Restore PCI confspace */
5763 	amdgpu_device_load_pci_state(pdev);
5764 
5765 	/* confirm  ASIC came out of reset */
5766 	for (i = 0; i < adev->usec_timeout; i++) {
5767 		memsize = amdgpu_asic_get_config_memsize(adev);
5768 
5769 		if (memsize != 0xffffffff)
5770 			break;
5771 		udelay(1);
5772 	}
5773 	if (memsize == 0xffffffff) {
5774 		r = -ETIME;
5775 		goto out;
5776 	}
5777 
5778 	reset_context.method = AMD_RESET_METHOD_NONE;
5779 	reset_context.reset_req_dev = adev;
5780 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5781 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5782 
5783 	adev->no_hw_access = true;
5784 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5785 	adev->no_hw_access = false;
5786 	if (r)
5787 		goto out;
5788 
5789 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5790 
5791 out:
5792 	if (!r) {
5793 		if (amdgpu_device_cache_pci_state(adev->pdev))
5794 			pci_restore_state(adev->pdev);
5795 
5796 		DRM_INFO("PCIe error recovery succeeded\n");
5797 	} else {
5798 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5799 		amdgpu_device_unset_mp1_state(adev);
5800 		amdgpu_device_unlock_reset_domain(adev->reset_domain);
5801 	}
5802 
5803 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5804 }
5805 
5806 /**
5807  * amdgpu_pci_resume() - resume normal ops after PCI reset
5808  * @pdev: pointer to PCI device
5809  *
5810  * Called when the error recovery driver tells us that its
5811  * OK to resume normal operation.
5812  */
5813 void amdgpu_pci_resume(struct pci_dev *pdev)
5814 {
5815 	struct drm_device *dev = pci_get_drvdata(pdev);
5816 	struct amdgpu_device *adev = drm_to_adev(dev);
5817 	int i;
5818 
5819 
5820 	DRM_INFO("PCI error: resume callback!!\n");
5821 
5822 	/* Only continue execution for the case of pci_channel_io_frozen */
5823 	if (adev->pci_channel_state != pci_channel_io_frozen)
5824 		return;
5825 
5826 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5827 		struct amdgpu_ring *ring = adev->rings[i];
5828 
5829 		if (!ring || !ring->sched.thread)
5830 			continue;
5831 
5832 		drm_sched_start(&ring->sched, true);
5833 	}
5834 
5835 	amdgpu_device_unset_mp1_state(adev);
5836 	amdgpu_device_unlock_reset_domain(adev->reset_domain);
5837 }
5838 
5839 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5840 {
5841 	struct drm_device *dev = pci_get_drvdata(pdev);
5842 	struct amdgpu_device *adev = drm_to_adev(dev);
5843 	int r;
5844 
5845 	r = pci_save_state(pdev);
5846 	if (!r) {
5847 		kfree(adev->pci_state);
5848 
5849 		adev->pci_state = pci_store_saved_state(pdev);
5850 
5851 		if (!adev->pci_state) {
5852 			DRM_ERROR("Failed to store PCI saved state");
5853 			return false;
5854 		}
5855 	} else {
5856 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5857 		return false;
5858 	}
5859 
5860 	return true;
5861 }
5862 
5863 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5864 {
5865 	struct drm_device *dev = pci_get_drvdata(pdev);
5866 	struct amdgpu_device *adev = drm_to_adev(dev);
5867 	int r;
5868 
5869 	if (!adev->pci_state)
5870 		return false;
5871 
5872 	r = pci_load_saved_state(pdev, adev->pci_state);
5873 
5874 	if (!r) {
5875 		pci_restore_state(pdev);
5876 	} else {
5877 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5878 		return false;
5879 	}
5880 
5881 	return true;
5882 }
5883 
5884 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5885 		struct amdgpu_ring *ring)
5886 {
5887 #ifdef CONFIG_X86_64
5888 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5889 		return;
5890 #endif
5891 	if (adev->gmc.xgmi.connected_to_cpu)
5892 		return;
5893 
5894 	if (ring && ring->funcs->emit_hdp_flush)
5895 		amdgpu_ring_emit_hdp_flush(ring);
5896 	else
5897 		amdgpu_asic_flush_hdp(adev, ring);
5898 }
5899 
5900 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5901 		struct amdgpu_ring *ring)
5902 {
5903 #ifdef CONFIG_X86_64
5904 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5905 		return;
5906 #endif
5907 	if (adev->gmc.xgmi.connected_to_cpu)
5908 		return;
5909 
5910 	amdgpu_asic_invalidate_hdp(adev, ring);
5911 }
5912 
5913 int amdgpu_in_reset(struct amdgpu_device *adev)
5914 {
5915 	return atomic_read(&adev->reset_domain->in_gpu_reset);
5916 }
5917 
5918 /**
5919  * amdgpu_device_halt() - bring hardware to some kind of halt state
5920  *
5921  * @adev: amdgpu_device pointer
5922  *
5923  * Bring hardware to some kind of halt state so that no one can touch it
5924  * any more. It will help to maintain error context when error occurred.
5925  * Compare to a simple hang, the system will keep stable at least for SSH
5926  * access. Then it should be trivial to inspect the hardware state and
5927  * see what's going on. Implemented as following:
5928  *
5929  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5930  *    clears all CPU mappings to device, disallows remappings through page faults
5931  * 2. amdgpu_irq_disable_all() disables all interrupts
5932  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5933  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5934  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5935  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5936  *    flush any in flight DMA operations
5937  */
5938 void amdgpu_device_halt(struct amdgpu_device *adev)
5939 {
5940 	struct pci_dev *pdev = adev->pdev;
5941 	struct drm_device *ddev = adev_to_drm(adev);
5942 
5943 	drm_dev_unplug(ddev);
5944 
5945 	amdgpu_irq_disable_all(adev);
5946 
5947 	amdgpu_fence_driver_hw_fini(adev);
5948 
5949 	adev->no_hw_access = true;
5950 
5951 	amdgpu_device_unmap_mmio(adev);
5952 
5953 	pci_disable_device(pdev);
5954 	pci_wait_for_pending_transaction(pdev);
5955 }
5956 
5957 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5958 				u32 reg)
5959 {
5960 	unsigned long flags, address, data;
5961 	u32 r;
5962 
5963 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5964 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5965 
5966 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5967 	WREG32(address, reg * 4);
5968 	(void)RREG32(address);
5969 	r = RREG32(data);
5970 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5971 	return r;
5972 }
5973 
5974 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5975 				u32 reg, u32 v)
5976 {
5977 	unsigned long flags, address, data;
5978 
5979 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5980 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5981 
5982 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5983 	WREG32(address, reg * 4);
5984 	(void)RREG32(address);
5985 	WREG32(data, v);
5986 	(void)RREG32(data);
5987 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5988 }
5989 
5990 /**
5991  * amdgpu_device_switch_gang - switch to a new gang
5992  * @adev: amdgpu_device pointer
5993  * @gang: the gang to switch to
5994  *
5995  * Try to switch to a new gang.
5996  * Returns: NULL if we switched to the new gang or a reference to the current
5997  * gang leader.
5998  */
5999 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6000 					    struct dma_fence *gang)
6001 {
6002 	struct dma_fence *old = NULL;
6003 
6004 	do {
6005 		dma_fence_put(old);
6006 		rcu_read_lock();
6007 		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6008 		rcu_read_unlock();
6009 
6010 		if (old == gang)
6011 			break;
6012 
6013 		if (!dma_fence_is_signaled(old))
6014 			return old;
6015 
6016 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6017 			 old, gang) != old);
6018 
6019 	dma_fence_put(old);
6020 	return NULL;
6021 }
6022 
6023 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6024 {
6025 	switch (adev->asic_type) {
6026 #ifdef CONFIG_DRM_AMDGPU_SI
6027 	case CHIP_HAINAN:
6028 #endif
6029 	case CHIP_TOPAZ:
6030 		/* chips with no display hardware */
6031 		return false;
6032 #ifdef CONFIG_DRM_AMDGPU_SI
6033 	case CHIP_TAHITI:
6034 	case CHIP_PITCAIRN:
6035 	case CHIP_VERDE:
6036 	case CHIP_OLAND:
6037 #endif
6038 #ifdef CONFIG_DRM_AMDGPU_CIK
6039 	case CHIP_BONAIRE:
6040 	case CHIP_HAWAII:
6041 	case CHIP_KAVERI:
6042 	case CHIP_KABINI:
6043 	case CHIP_MULLINS:
6044 #endif
6045 	case CHIP_TONGA:
6046 	case CHIP_FIJI:
6047 	case CHIP_POLARIS10:
6048 	case CHIP_POLARIS11:
6049 	case CHIP_POLARIS12:
6050 	case CHIP_VEGAM:
6051 	case CHIP_CARRIZO:
6052 	case CHIP_STONEY:
6053 		/* chips with display hardware */
6054 		return true;
6055 	default:
6056 		/* IP discovery */
6057 		if (!adev->ip_versions[DCE_HWIP][0] ||
6058 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6059 			return false;
6060 		return true;
6061 	}
6062 }
6063