xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision 1ec1944eb50c8de2d96de1188eec9f8b22d03366)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_probe_helper.h>
38 #include <drm/amdgpu_drm.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
41 #include <linux/efi.h>
42 #include "amdgpu.h"
43 #include "amdgpu_trace.h"
44 #include "amdgpu_i2c.h"
45 #include "atom.h"
46 #include "amdgpu_atombios.h"
47 #include "amdgpu_atomfirmware.h"
48 #include "amd_pcie.h"
49 #ifdef CONFIG_DRM_AMDGPU_SI
50 #include "si.h"
51 #endif
52 #ifdef CONFIG_DRM_AMDGPU_CIK
53 #include "cik.h"
54 #endif
55 #include "vi.h"
56 #include "soc15.h"
57 #include "nv.h"
58 #include "bif/bif_4_1_d.h"
59 #include <linux/firmware.h>
60 #include "amdgpu_vf_error.h"
61 
62 #include "amdgpu_amdkfd.h"
63 #include "amdgpu_pm.h"
64 
65 #include "amdgpu_xgmi.h"
66 #include "amdgpu_ras.h"
67 #include "amdgpu_pmu.h"
68 #include "amdgpu_fru_eeprom.h"
69 #include "amdgpu_reset.h"
70 
71 #include <linux/suspend.h>
72 #include <drm/task_barrier.h>
73 #include <linux/pm_runtime.h>
74 
75 #include <drm/drm_drv.h>
76 
77 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
88 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
89 
90 #define AMDGPU_RESUME_MS		2000
91 
92 const char *amdgpu_asic_name[] = {
93 	"TAHITI",
94 	"PITCAIRN",
95 	"VERDE",
96 	"OLAND",
97 	"HAINAN",
98 	"BONAIRE",
99 	"KAVERI",
100 	"KABINI",
101 	"HAWAII",
102 	"MULLINS",
103 	"TOPAZ",
104 	"TONGA",
105 	"FIJI",
106 	"CARRIZO",
107 	"STONEY",
108 	"POLARIS10",
109 	"POLARIS11",
110 	"POLARIS12",
111 	"VEGAM",
112 	"VEGA10",
113 	"VEGA12",
114 	"VEGA20",
115 	"RAVEN",
116 	"ARCTURUS",
117 	"RENOIR",
118 	"ALDEBARAN",
119 	"NAVI10",
120 	"CYAN_SKILLFISH",
121 	"NAVI14",
122 	"NAVI12",
123 	"SIENNA_CICHLID",
124 	"NAVY_FLOUNDER",
125 	"VANGOGH",
126 	"DIMGREY_CAVEFISH",
127 	"BEIGE_GOBY",
128 	"YELLOW_CARP",
129 	"IP DISCOVERY",
130 	"LAST",
131 };
132 
133 /**
134  * DOC: pcie_replay_count
135  *
136  * The amdgpu driver provides a sysfs API for reporting the total number
137  * of PCIe replays (NAKs)
138  * The file pcie_replay_count is used for this and returns the total
139  * number of replays as a sum of the NAKs generated and NAKs received
140  */
141 
142 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
143 		struct device_attribute *attr, char *buf)
144 {
145 	struct drm_device *ddev = dev_get_drvdata(dev);
146 	struct amdgpu_device *adev = drm_to_adev(ddev);
147 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
148 
149 	return sysfs_emit(buf, "%llu\n", cnt);
150 }
151 
152 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
153 		amdgpu_device_get_pcie_replay_count, NULL);
154 
155 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
156 
157 /**
158  * DOC: product_name
159  *
160  * The amdgpu driver provides a sysfs API for reporting the product name
161  * for the device
162  * The file serial_number is used for this and returns the product name
163  * as returned from the FRU.
164  * NOTE: This is only available for certain server cards
165  */
166 
167 static ssize_t amdgpu_device_get_product_name(struct device *dev,
168 		struct device_attribute *attr, char *buf)
169 {
170 	struct drm_device *ddev = dev_get_drvdata(dev);
171 	struct amdgpu_device *adev = drm_to_adev(ddev);
172 
173 	return sysfs_emit(buf, "%s\n", adev->product_name);
174 }
175 
176 static DEVICE_ATTR(product_name, S_IRUGO,
177 		amdgpu_device_get_product_name, NULL);
178 
179 /**
180  * DOC: product_number
181  *
182  * The amdgpu driver provides a sysfs API for reporting the part number
183  * for the device
184  * The file serial_number is used for this and returns the part number
185  * as returned from the FRU.
186  * NOTE: This is only available for certain server cards
187  */
188 
189 static ssize_t amdgpu_device_get_product_number(struct device *dev,
190 		struct device_attribute *attr, char *buf)
191 {
192 	struct drm_device *ddev = dev_get_drvdata(dev);
193 	struct amdgpu_device *adev = drm_to_adev(ddev);
194 
195 	return sysfs_emit(buf, "%s\n", adev->product_number);
196 }
197 
198 static DEVICE_ATTR(product_number, S_IRUGO,
199 		amdgpu_device_get_product_number, NULL);
200 
201 /**
202  * DOC: serial_number
203  *
204  * The amdgpu driver provides a sysfs API for reporting the serial number
205  * for the device
206  * The file serial_number is used for this and returns the serial number
207  * as returned from the FRU.
208  * NOTE: This is only available for certain server cards
209  */
210 
211 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
212 		struct device_attribute *attr, char *buf)
213 {
214 	struct drm_device *ddev = dev_get_drvdata(dev);
215 	struct amdgpu_device *adev = drm_to_adev(ddev);
216 
217 	return sysfs_emit(buf, "%s\n", adev->serial);
218 }
219 
220 static DEVICE_ATTR(serial_number, S_IRUGO,
221 		amdgpu_device_get_serial_number, NULL);
222 
223 /**
224  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
225  *
226  * @dev: drm_device pointer
227  *
228  * Returns true if the device is a dGPU with ATPX power control,
229  * otherwise return false.
230  */
231 bool amdgpu_device_supports_px(struct drm_device *dev)
232 {
233 	struct amdgpu_device *adev = drm_to_adev(dev);
234 
235 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
236 		return true;
237 	return false;
238 }
239 
240 /**
241  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
242  *
243  * @dev: drm_device pointer
244  *
245  * Returns true if the device is a dGPU with ACPI power control,
246  * otherwise return false.
247  */
248 bool amdgpu_device_supports_boco(struct drm_device *dev)
249 {
250 	struct amdgpu_device *adev = drm_to_adev(dev);
251 
252 	if (adev->has_pr3 ||
253 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
254 		return true;
255 	return false;
256 }
257 
258 /**
259  * amdgpu_device_supports_baco - Does the device support BACO
260  *
261  * @dev: drm_device pointer
262  *
263  * Returns true if the device supporte BACO,
264  * otherwise return false.
265  */
266 bool amdgpu_device_supports_baco(struct drm_device *dev)
267 {
268 	struct amdgpu_device *adev = drm_to_adev(dev);
269 
270 	return amdgpu_asic_supports_baco(adev);
271 }
272 
273 /**
274  * amdgpu_device_supports_smart_shift - Is the device dGPU with
275  * smart shift support
276  *
277  * @dev: drm_device pointer
278  *
279  * Returns true if the device is a dGPU with Smart Shift support,
280  * otherwise returns false.
281  */
282 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
283 {
284 	return (amdgpu_device_supports_boco(dev) &&
285 		amdgpu_acpi_is_power_shift_control_supported());
286 }
287 
288 /*
289  * VRAM access helper functions
290  */
291 
292 /**
293  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
294  *
295  * @adev: amdgpu_device pointer
296  * @pos: offset of the buffer in vram
297  * @buf: virtual address of the buffer in system memory
298  * @size: read/write size, sizeof(@buf) must > @size
299  * @write: true - write to vram, otherwise - read from vram
300  */
301 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
302 			     void *buf, size_t size, bool write)
303 {
304 	unsigned long flags;
305 	uint32_t hi = ~0, tmp = 0;
306 	uint32_t *data = buf;
307 	uint64_t last;
308 	int idx;
309 
310 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
311 		return;
312 
313 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
314 
315 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
316 	for (last = pos + size; pos < last; pos += 4) {
317 		tmp = pos >> 31;
318 
319 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
320 		if (tmp != hi) {
321 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
322 			hi = tmp;
323 		}
324 		if (write)
325 			WREG32_NO_KIQ(mmMM_DATA, *data++);
326 		else
327 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
328 	}
329 
330 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
331 	drm_dev_exit(idx);
332 }
333 
334 /**
335  * amdgpu_device_aper_access - access vram by vram aperature
336  *
337  * @adev: amdgpu_device pointer
338  * @pos: offset of the buffer in vram
339  * @buf: virtual address of the buffer in system memory
340  * @size: read/write size, sizeof(@buf) must > @size
341  * @write: true - write to vram, otherwise - read from vram
342  *
343  * The return value means how many bytes have been transferred.
344  */
345 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
346 				 void *buf, size_t size, bool write)
347 {
348 #ifdef CONFIG_64BIT
349 	void __iomem *addr;
350 	size_t count = 0;
351 	uint64_t last;
352 
353 	if (!adev->mman.aper_base_kaddr)
354 		return 0;
355 
356 	last = min(pos + size, adev->gmc.visible_vram_size);
357 	if (last > pos) {
358 		addr = adev->mman.aper_base_kaddr + pos;
359 		count = last - pos;
360 
361 		if (write) {
362 			memcpy_toio(addr, buf, count);
363 			mb();
364 			amdgpu_device_flush_hdp(adev, NULL);
365 		} else {
366 			amdgpu_device_invalidate_hdp(adev, NULL);
367 			mb();
368 			memcpy_fromio(buf, addr, count);
369 		}
370 
371 	}
372 
373 	return count;
374 #else
375 	return 0;
376 #endif
377 }
378 
379 /**
380  * amdgpu_device_vram_access - read/write a buffer in vram
381  *
382  * @adev: amdgpu_device pointer
383  * @pos: offset of the buffer in vram
384  * @buf: virtual address of the buffer in system memory
385  * @size: read/write size, sizeof(@buf) must > @size
386  * @write: true - write to vram, otherwise - read from vram
387  */
388 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
389 			       void *buf, size_t size, bool write)
390 {
391 	size_t count;
392 
393 	/* try to using vram apreature to access vram first */
394 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
395 	size -= count;
396 	if (size) {
397 		/* using MM to access rest vram */
398 		pos += count;
399 		buf += count;
400 		amdgpu_device_mm_access(adev, pos, buf, size, write);
401 	}
402 }
403 
404 /*
405  * register access helper functions.
406  */
407 
408 /* Check if hw access should be skipped because of hotplug or device error */
409 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
410 {
411 	if (adev->no_hw_access)
412 		return true;
413 
414 #ifdef CONFIG_LOCKDEP
415 	/*
416 	 * This is a bit complicated to understand, so worth a comment. What we assert
417 	 * here is that the GPU reset is not running on another thread in parallel.
418 	 *
419 	 * For this we trylock the read side of the reset semaphore, if that succeeds
420 	 * we know that the reset is not running in paralell.
421 	 *
422 	 * If the trylock fails we assert that we are either already holding the read
423 	 * side of the lock or are the reset thread itself and hold the write side of
424 	 * the lock.
425 	 */
426 	if (in_task()) {
427 		if (down_read_trylock(&adev->reset_sem))
428 			up_read(&adev->reset_sem);
429 		else
430 			lockdep_assert_held(&adev->reset_sem);
431 	}
432 #endif
433 	return false;
434 }
435 
436 /**
437  * amdgpu_device_rreg - read a memory mapped IO or indirect register
438  *
439  * @adev: amdgpu_device pointer
440  * @reg: dword aligned register offset
441  * @acc_flags: access flags which require special behavior
442  *
443  * Returns the 32 bit value from the offset specified.
444  */
445 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
446 			    uint32_t reg, uint32_t acc_flags)
447 {
448 	uint32_t ret;
449 
450 	if (amdgpu_device_skip_hw_access(adev))
451 		return 0;
452 
453 	if ((reg * 4) < adev->rmmio_size) {
454 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
455 		    amdgpu_sriov_runtime(adev) &&
456 		    down_read_trylock(&adev->reset_sem)) {
457 			ret = amdgpu_kiq_rreg(adev, reg);
458 			up_read(&adev->reset_sem);
459 		} else {
460 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
461 		}
462 	} else {
463 		ret = adev->pcie_rreg(adev, reg * 4);
464 	}
465 
466 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
467 
468 	return ret;
469 }
470 
471 /*
472  * MMIO register read with bytes helper functions
473  * @offset:bytes offset from MMIO start
474  *
475 */
476 
477 /**
478  * amdgpu_mm_rreg8 - read a memory mapped IO register
479  *
480  * @adev: amdgpu_device pointer
481  * @offset: byte aligned register offset
482  *
483  * Returns the 8 bit value from the offset specified.
484  */
485 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
486 {
487 	if (amdgpu_device_skip_hw_access(adev))
488 		return 0;
489 
490 	if (offset < adev->rmmio_size)
491 		return (readb(adev->rmmio + offset));
492 	BUG();
493 }
494 
495 /*
496  * MMIO register write with bytes helper functions
497  * @offset:bytes offset from MMIO start
498  * @value: the value want to be written to the register
499  *
500 */
501 /**
502  * amdgpu_mm_wreg8 - read a memory mapped IO register
503  *
504  * @adev: amdgpu_device pointer
505  * @offset: byte aligned register offset
506  * @value: 8 bit value to write
507  *
508  * Writes the value specified to the offset specified.
509  */
510 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
511 {
512 	if (amdgpu_device_skip_hw_access(adev))
513 		return;
514 
515 	if (offset < adev->rmmio_size)
516 		writeb(value, adev->rmmio + offset);
517 	else
518 		BUG();
519 }
520 
521 /**
522  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
523  *
524  * @adev: amdgpu_device pointer
525  * @reg: dword aligned register offset
526  * @v: 32 bit value to write to the register
527  * @acc_flags: access flags which require special behavior
528  *
529  * Writes the value specified to the offset specified.
530  */
531 void amdgpu_device_wreg(struct amdgpu_device *adev,
532 			uint32_t reg, uint32_t v,
533 			uint32_t acc_flags)
534 {
535 	if (amdgpu_device_skip_hw_access(adev))
536 		return;
537 
538 	if ((reg * 4) < adev->rmmio_size) {
539 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
540 		    amdgpu_sriov_runtime(adev) &&
541 		    down_read_trylock(&adev->reset_sem)) {
542 			amdgpu_kiq_wreg(adev, reg, v);
543 			up_read(&adev->reset_sem);
544 		} else {
545 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
546 		}
547 	} else {
548 		adev->pcie_wreg(adev, reg * 4, v);
549 	}
550 
551 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
552 }
553 
554 /**
555  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
556  *
557  * @adev: amdgpu_device pointer
558  * @reg: mmio/rlc register
559  * @v: value to write
560  *
561  * this function is invoked only for the debugfs register access
562  */
563 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
564 			     uint32_t reg, uint32_t v)
565 {
566 	if (amdgpu_device_skip_hw_access(adev))
567 		return;
568 
569 	if (amdgpu_sriov_fullaccess(adev) &&
570 	    adev->gfx.rlc.funcs &&
571 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
572 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
573 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
574 	} else if ((reg * 4) >= adev->rmmio_size) {
575 		adev->pcie_wreg(adev, reg * 4, v);
576 	} else {
577 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
578 	}
579 }
580 
581 /**
582  * amdgpu_mm_rdoorbell - read a doorbell dword
583  *
584  * @adev: amdgpu_device pointer
585  * @index: doorbell index
586  *
587  * Returns the value in the doorbell aperture at the
588  * requested doorbell index (CIK).
589  */
590 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
591 {
592 	if (amdgpu_device_skip_hw_access(adev))
593 		return 0;
594 
595 	if (index < adev->doorbell.num_doorbells) {
596 		return readl(adev->doorbell.ptr + index);
597 	} else {
598 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
599 		return 0;
600 	}
601 }
602 
603 /**
604  * amdgpu_mm_wdoorbell - write a doorbell dword
605  *
606  * @adev: amdgpu_device pointer
607  * @index: doorbell index
608  * @v: value to write
609  *
610  * Writes @v to the doorbell aperture at the
611  * requested doorbell index (CIK).
612  */
613 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
614 {
615 	if (amdgpu_device_skip_hw_access(adev))
616 		return;
617 
618 	if (index < adev->doorbell.num_doorbells) {
619 		writel(v, adev->doorbell.ptr + index);
620 	} else {
621 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
622 	}
623 }
624 
625 /**
626  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
627  *
628  * @adev: amdgpu_device pointer
629  * @index: doorbell index
630  *
631  * Returns the value in the doorbell aperture at the
632  * requested doorbell index (VEGA10+).
633  */
634 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
635 {
636 	if (amdgpu_device_skip_hw_access(adev))
637 		return 0;
638 
639 	if (index < adev->doorbell.num_doorbells) {
640 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
641 	} else {
642 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
643 		return 0;
644 	}
645 }
646 
647 /**
648  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
649  *
650  * @adev: amdgpu_device pointer
651  * @index: doorbell index
652  * @v: value to write
653  *
654  * Writes @v to the doorbell aperture at the
655  * requested doorbell index (VEGA10+).
656  */
657 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
658 {
659 	if (amdgpu_device_skip_hw_access(adev))
660 		return;
661 
662 	if (index < adev->doorbell.num_doorbells) {
663 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
664 	} else {
665 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
666 	}
667 }
668 
669 /**
670  * amdgpu_device_indirect_rreg - read an indirect register
671  *
672  * @adev: amdgpu_device pointer
673  * @pcie_index: mmio register offset
674  * @pcie_data: mmio register offset
675  * @reg_addr: indirect register address to read from
676  *
677  * Returns the value of indirect register @reg_addr
678  */
679 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
680 				u32 pcie_index, u32 pcie_data,
681 				u32 reg_addr)
682 {
683 	unsigned long flags;
684 	u32 r;
685 	void __iomem *pcie_index_offset;
686 	void __iomem *pcie_data_offset;
687 
688 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
689 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
690 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
691 
692 	writel(reg_addr, pcie_index_offset);
693 	readl(pcie_index_offset);
694 	r = readl(pcie_data_offset);
695 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
696 
697 	return r;
698 }
699 
700 /**
701  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
702  *
703  * @adev: amdgpu_device pointer
704  * @pcie_index: mmio register offset
705  * @pcie_data: mmio register offset
706  * @reg_addr: indirect register address to read from
707  *
708  * Returns the value of indirect register @reg_addr
709  */
710 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
711 				  u32 pcie_index, u32 pcie_data,
712 				  u32 reg_addr)
713 {
714 	unsigned long flags;
715 	u64 r;
716 	void __iomem *pcie_index_offset;
717 	void __iomem *pcie_data_offset;
718 
719 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
720 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
721 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
722 
723 	/* read low 32 bits */
724 	writel(reg_addr, pcie_index_offset);
725 	readl(pcie_index_offset);
726 	r = readl(pcie_data_offset);
727 	/* read high 32 bits */
728 	writel(reg_addr + 4, pcie_index_offset);
729 	readl(pcie_index_offset);
730 	r |= ((u64)readl(pcie_data_offset) << 32);
731 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
732 
733 	return r;
734 }
735 
736 /**
737  * amdgpu_device_indirect_wreg - write an indirect register address
738  *
739  * @adev: amdgpu_device pointer
740  * @pcie_index: mmio register offset
741  * @pcie_data: mmio register offset
742  * @reg_addr: indirect register offset
743  * @reg_data: indirect register data
744  *
745  */
746 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
747 				 u32 pcie_index, u32 pcie_data,
748 				 u32 reg_addr, u32 reg_data)
749 {
750 	unsigned long flags;
751 	void __iomem *pcie_index_offset;
752 	void __iomem *pcie_data_offset;
753 
754 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
755 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
756 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
757 
758 	writel(reg_addr, pcie_index_offset);
759 	readl(pcie_index_offset);
760 	writel(reg_data, pcie_data_offset);
761 	readl(pcie_data_offset);
762 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
763 }
764 
765 /**
766  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
767  *
768  * @adev: amdgpu_device pointer
769  * @pcie_index: mmio register offset
770  * @pcie_data: mmio register offset
771  * @reg_addr: indirect register offset
772  * @reg_data: indirect register data
773  *
774  */
775 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
776 				   u32 pcie_index, u32 pcie_data,
777 				   u32 reg_addr, u64 reg_data)
778 {
779 	unsigned long flags;
780 	void __iomem *pcie_index_offset;
781 	void __iomem *pcie_data_offset;
782 
783 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
784 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
785 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
786 
787 	/* write low 32 bits */
788 	writel(reg_addr, pcie_index_offset);
789 	readl(pcie_index_offset);
790 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
791 	readl(pcie_data_offset);
792 	/* write high 32 bits */
793 	writel(reg_addr + 4, pcie_index_offset);
794 	readl(pcie_index_offset);
795 	writel((u32)(reg_data >> 32), pcie_data_offset);
796 	readl(pcie_data_offset);
797 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
798 }
799 
800 /**
801  * amdgpu_invalid_rreg - dummy reg read function
802  *
803  * @adev: amdgpu_device pointer
804  * @reg: offset of register
805  *
806  * Dummy register read function.  Used for register blocks
807  * that certain asics don't have (all asics).
808  * Returns the value in the register.
809  */
810 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
811 {
812 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
813 	BUG();
814 	return 0;
815 }
816 
817 /**
818  * amdgpu_invalid_wreg - dummy reg write function
819  *
820  * @adev: amdgpu_device pointer
821  * @reg: offset of register
822  * @v: value to write to the register
823  *
824  * Dummy register read function.  Used for register blocks
825  * that certain asics don't have (all asics).
826  */
827 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
828 {
829 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
830 		  reg, v);
831 	BUG();
832 }
833 
834 /**
835  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
836  *
837  * @adev: amdgpu_device pointer
838  * @reg: offset of register
839  *
840  * Dummy register read function.  Used for register blocks
841  * that certain asics don't have (all asics).
842  * Returns the value in the register.
843  */
844 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
845 {
846 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
847 	BUG();
848 	return 0;
849 }
850 
851 /**
852  * amdgpu_invalid_wreg64 - dummy reg write function
853  *
854  * @adev: amdgpu_device pointer
855  * @reg: offset of register
856  * @v: value to write to the register
857  *
858  * Dummy register read function.  Used for register blocks
859  * that certain asics don't have (all asics).
860  */
861 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
862 {
863 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
864 		  reg, v);
865 	BUG();
866 }
867 
868 /**
869  * amdgpu_block_invalid_rreg - dummy reg read function
870  *
871  * @adev: amdgpu_device pointer
872  * @block: offset of instance
873  * @reg: offset of register
874  *
875  * Dummy register read function.  Used for register blocks
876  * that certain asics don't have (all asics).
877  * Returns the value in the register.
878  */
879 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
880 					  uint32_t block, uint32_t reg)
881 {
882 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
883 		  reg, block);
884 	BUG();
885 	return 0;
886 }
887 
888 /**
889  * amdgpu_block_invalid_wreg - dummy reg write function
890  *
891  * @adev: amdgpu_device pointer
892  * @block: offset of instance
893  * @reg: offset of register
894  * @v: value to write to the register
895  *
896  * Dummy register read function.  Used for register blocks
897  * that certain asics don't have (all asics).
898  */
899 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
900 				      uint32_t block,
901 				      uint32_t reg, uint32_t v)
902 {
903 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
904 		  reg, block, v);
905 	BUG();
906 }
907 
908 /**
909  * amdgpu_device_asic_init - Wrapper for atom asic_init
910  *
911  * @adev: amdgpu_device pointer
912  *
913  * Does any asic specific work and then calls atom asic init.
914  */
915 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
916 {
917 	amdgpu_asic_pre_asic_init(adev);
918 
919 	return amdgpu_atom_asic_init(adev->mode_info.atom_context);
920 }
921 
922 /**
923  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
924  *
925  * @adev: amdgpu_device pointer
926  *
927  * Allocates a scratch page of VRAM for use by various things in the
928  * driver.
929  */
930 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
931 {
932 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
933 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
934 				       &adev->vram_scratch.robj,
935 				       &adev->vram_scratch.gpu_addr,
936 				       (void **)&adev->vram_scratch.ptr);
937 }
938 
939 /**
940  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
941  *
942  * @adev: amdgpu_device pointer
943  *
944  * Frees the VRAM scratch page.
945  */
946 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
947 {
948 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
949 }
950 
951 /**
952  * amdgpu_device_program_register_sequence - program an array of registers.
953  *
954  * @adev: amdgpu_device pointer
955  * @registers: pointer to the register array
956  * @array_size: size of the register array
957  *
958  * Programs an array or registers with and and or masks.
959  * This is a helper for setting golden registers.
960  */
961 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
962 					     const u32 *registers,
963 					     const u32 array_size)
964 {
965 	u32 tmp, reg, and_mask, or_mask;
966 	int i;
967 
968 	if (array_size % 3)
969 		return;
970 
971 	for (i = 0; i < array_size; i +=3) {
972 		reg = registers[i + 0];
973 		and_mask = registers[i + 1];
974 		or_mask = registers[i + 2];
975 
976 		if (and_mask == 0xffffffff) {
977 			tmp = or_mask;
978 		} else {
979 			tmp = RREG32(reg);
980 			tmp &= ~and_mask;
981 			if (adev->family >= AMDGPU_FAMILY_AI)
982 				tmp |= (or_mask & and_mask);
983 			else
984 				tmp |= or_mask;
985 		}
986 		WREG32(reg, tmp);
987 	}
988 }
989 
990 /**
991  * amdgpu_device_pci_config_reset - reset the GPU
992  *
993  * @adev: amdgpu_device pointer
994  *
995  * Resets the GPU using the pci config reset sequence.
996  * Only applicable to asics prior to vega10.
997  */
998 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
999 {
1000 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1001 }
1002 
1003 /**
1004  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1005  *
1006  * @adev: amdgpu_device pointer
1007  *
1008  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1009  */
1010 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1011 {
1012 	return pci_reset_function(adev->pdev);
1013 }
1014 
1015 /*
1016  * GPU doorbell aperture helpers function.
1017  */
1018 /**
1019  * amdgpu_device_doorbell_init - Init doorbell driver information.
1020  *
1021  * @adev: amdgpu_device pointer
1022  *
1023  * Init doorbell driver information (CIK)
1024  * Returns 0 on success, error on failure.
1025  */
1026 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1027 {
1028 
1029 	/* No doorbell on SI hardware generation */
1030 	if (adev->asic_type < CHIP_BONAIRE) {
1031 		adev->doorbell.base = 0;
1032 		adev->doorbell.size = 0;
1033 		adev->doorbell.num_doorbells = 0;
1034 		adev->doorbell.ptr = NULL;
1035 		return 0;
1036 	}
1037 
1038 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1039 		return -EINVAL;
1040 
1041 	amdgpu_asic_init_doorbell_index(adev);
1042 
1043 	/* doorbell bar mapping */
1044 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1045 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1046 
1047 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1048 					     adev->doorbell_index.max_assignment+1);
1049 	if (adev->doorbell.num_doorbells == 0)
1050 		return -EINVAL;
1051 
1052 	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1053 	 * paging queue doorbell use the second page. The
1054 	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1055 	 * doorbells are in the first page. So with paging queue enabled,
1056 	 * the max num_doorbells should + 1 page (0x400 in dword)
1057 	 */
1058 	if (adev->asic_type >= CHIP_VEGA10)
1059 		adev->doorbell.num_doorbells += 0x400;
1060 
1061 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1062 				     adev->doorbell.num_doorbells *
1063 				     sizeof(u32));
1064 	if (adev->doorbell.ptr == NULL)
1065 		return -ENOMEM;
1066 
1067 	return 0;
1068 }
1069 
1070 /**
1071  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1072  *
1073  * @adev: amdgpu_device pointer
1074  *
1075  * Tear down doorbell driver information (CIK)
1076  */
1077 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1078 {
1079 	iounmap(adev->doorbell.ptr);
1080 	adev->doorbell.ptr = NULL;
1081 }
1082 
1083 
1084 
1085 /*
1086  * amdgpu_device_wb_*()
1087  * Writeback is the method by which the GPU updates special pages in memory
1088  * with the status of certain GPU events (fences, ring pointers,etc.).
1089  */
1090 
1091 /**
1092  * amdgpu_device_wb_fini - Disable Writeback and free memory
1093  *
1094  * @adev: amdgpu_device pointer
1095  *
1096  * Disables Writeback and frees the Writeback memory (all asics).
1097  * Used at driver shutdown.
1098  */
1099 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1100 {
1101 	if (adev->wb.wb_obj) {
1102 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1103 				      &adev->wb.gpu_addr,
1104 				      (void **)&adev->wb.wb);
1105 		adev->wb.wb_obj = NULL;
1106 	}
1107 }
1108 
1109 /**
1110  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1111  *
1112  * @adev: amdgpu_device pointer
1113  *
1114  * Initializes writeback and allocates writeback memory (all asics).
1115  * Used at driver startup.
1116  * Returns 0 on success or an -error on failure.
1117  */
1118 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1119 {
1120 	int r;
1121 
1122 	if (adev->wb.wb_obj == NULL) {
1123 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1124 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1125 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1126 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1127 					    (void **)&adev->wb.wb);
1128 		if (r) {
1129 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1130 			return r;
1131 		}
1132 
1133 		adev->wb.num_wb = AMDGPU_MAX_WB;
1134 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1135 
1136 		/* clear wb memory */
1137 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 /**
1144  * amdgpu_device_wb_get - Allocate a wb entry
1145  *
1146  * @adev: amdgpu_device pointer
1147  * @wb: wb index
1148  *
1149  * Allocate a wb slot for use by the driver (all asics).
1150  * Returns 0 on success or -EINVAL on failure.
1151  */
1152 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1153 {
1154 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1155 
1156 	if (offset < adev->wb.num_wb) {
1157 		__set_bit(offset, adev->wb.used);
1158 		*wb = offset << 3; /* convert to dw offset */
1159 		return 0;
1160 	} else {
1161 		return -EINVAL;
1162 	}
1163 }
1164 
1165 /**
1166  * amdgpu_device_wb_free - Free a wb entry
1167  *
1168  * @adev: amdgpu_device pointer
1169  * @wb: wb index
1170  *
1171  * Free a wb slot allocated for use by the driver (all asics)
1172  */
1173 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1174 {
1175 	wb >>= 3;
1176 	if (wb < adev->wb.num_wb)
1177 		__clear_bit(wb, adev->wb.used);
1178 }
1179 
1180 /**
1181  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1182  *
1183  * @adev: amdgpu_device pointer
1184  *
1185  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1186  * to fail, but if any of the BARs is not accessible after the size we abort
1187  * driver loading by returning -ENODEV.
1188  */
1189 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1190 {
1191 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1192 	struct pci_bus *root;
1193 	struct resource *res;
1194 	unsigned i;
1195 	u16 cmd;
1196 	int r;
1197 
1198 	/* Bypass for VF */
1199 	if (amdgpu_sriov_vf(adev))
1200 		return 0;
1201 
1202 	/* skip if the bios has already enabled large BAR */
1203 	if (adev->gmc.real_vram_size &&
1204 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1205 		return 0;
1206 
1207 	/* Check if the root BUS has 64bit memory resources */
1208 	root = adev->pdev->bus;
1209 	while (root->parent)
1210 		root = root->parent;
1211 
1212 	pci_bus_for_each_resource(root, res, i) {
1213 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1214 		    res->start > 0x100000000ull)
1215 			break;
1216 	}
1217 
1218 	/* Trying to resize is pointless without a root hub window above 4GB */
1219 	if (!res)
1220 		return 0;
1221 
1222 	/* Limit the BAR size to what is available */
1223 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1224 			rbar_size);
1225 
1226 	/* Disable memory decoding while we change the BAR addresses and size */
1227 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1228 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1229 			      cmd & ~PCI_COMMAND_MEMORY);
1230 
1231 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1232 	amdgpu_device_doorbell_fini(adev);
1233 	if (adev->asic_type >= CHIP_BONAIRE)
1234 		pci_release_resource(adev->pdev, 2);
1235 
1236 	pci_release_resource(adev->pdev, 0);
1237 
1238 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1239 	if (r == -ENOSPC)
1240 		DRM_INFO("Not enough PCI address space for a large BAR.");
1241 	else if (r && r != -ENOTSUPP)
1242 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1243 
1244 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1245 
1246 	/* When the doorbell or fb BAR isn't available we have no chance of
1247 	 * using the device.
1248 	 */
1249 	r = amdgpu_device_doorbell_init(adev);
1250 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1251 		return -ENODEV;
1252 
1253 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1254 
1255 	return 0;
1256 }
1257 
1258 /*
1259  * GPU helpers function.
1260  */
1261 /**
1262  * amdgpu_device_need_post - check if the hw need post or not
1263  *
1264  * @adev: amdgpu_device pointer
1265  *
1266  * Check if the asic has been initialized (all asics) at driver startup
1267  * or post is needed if  hw reset is performed.
1268  * Returns true if need or false if not.
1269  */
1270 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1271 {
1272 	uint32_t reg;
1273 
1274 	if (amdgpu_sriov_vf(adev))
1275 		return false;
1276 
1277 	if (amdgpu_passthrough(adev)) {
1278 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1279 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1280 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1281 		 * vpost executed for smc version below 22.15
1282 		 */
1283 		if (adev->asic_type == CHIP_FIJI) {
1284 			int err;
1285 			uint32_t fw_ver;
1286 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1287 			/* force vPost if error occured */
1288 			if (err)
1289 				return true;
1290 
1291 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1292 			if (fw_ver < 0x00160e00)
1293 				return true;
1294 		}
1295 	}
1296 
1297 	/* Don't post if we need to reset whole hive on init */
1298 	if (adev->gmc.xgmi.pending_reset)
1299 		return false;
1300 
1301 	if (adev->has_hw_reset) {
1302 		adev->has_hw_reset = false;
1303 		return true;
1304 	}
1305 
1306 	/* bios scratch used on CIK+ */
1307 	if (adev->asic_type >= CHIP_BONAIRE)
1308 		return amdgpu_atombios_scratch_need_asic_init(adev);
1309 
1310 	/* check MEM_SIZE for older asics */
1311 	reg = amdgpu_asic_get_config_memsize(adev);
1312 
1313 	if ((reg != 0) && (reg != 0xffffffff))
1314 		return false;
1315 
1316 	return true;
1317 }
1318 
1319 /* if we get transitioned to only one device, take VGA back */
1320 /**
1321  * amdgpu_device_vga_set_decode - enable/disable vga decode
1322  *
1323  * @pdev: PCI device pointer
1324  * @state: enable/disable vga decode
1325  *
1326  * Enable/disable vga decode (all asics).
1327  * Returns VGA resource flags.
1328  */
1329 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1330 		bool state)
1331 {
1332 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1333 	amdgpu_asic_set_vga_state(adev, state);
1334 	if (state)
1335 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1336 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1337 	else
1338 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1339 }
1340 
1341 /**
1342  * amdgpu_device_check_block_size - validate the vm block size
1343  *
1344  * @adev: amdgpu_device pointer
1345  *
1346  * Validates the vm block size specified via module parameter.
1347  * The vm block size defines number of bits in page table versus page directory,
1348  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1349  * page table and the remaining bits are in the page directory.
1350  */
1351 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1352 {
1353 	/* defines number of bits in page table versus page directory,
1354 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1355 	 * page table and the remaining bits are in the page directory */
1356 	if (amdgpu_vm_block_size == -1)
1357 		return;
1358 
1359 	if (amdgpu_vm_block_size < 9) {
1360 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1361 			 amdgpu_vm_block_size);
1362 		amdgpu_vm_block_size = -1;
1363 	}
1364 }
1365 
1366 /**
1367  * amdgpu_device_check_vm_size - validate the vm size
1368  *
1369  * @adev: amdgpu_device pointer
1370  *
1371  * Validates the vm size in GB specified via module parameter.
1372  * The VM size is the size of the GPU virtual memory space in GB.
1373  */
1374 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1375 {
1376 	/* no need to check the default value */
1377 	if (amdgpu_vm_size == -1)
1378 		return;
1379 
1380 	if (amdgpu_vm_size < 1) {
1381 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1382 			 amdgpu_vm_size);
1383 		amdgpu_vm_size = -1;
1384 	}
1385 }
1386 
1387 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1388 {
1389 	struct sysinfo si;
1390 	bool is_os_64 = (sizeof(void *) == 8);
1391 	uint64_t total_memory;
1392 	uint64_t dram_size_seven_GB = 0x1B8000000;
1393 	uint64_t dram_size_three_GB = 0xB8000000;
1394 
1395 	if (amdgpu_smu_memory_pool_size == 0)
1396 		return;
1397 
1398 	if (!is_os_64) {
1399 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1400 		goto def_value;
1401 	}
1402 	si_meminfo(&si);
1403 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1404 
1405 	if ((amdgpu_smu_memory_pool_size == 1) ||
1406 		(amdgpu_smu_memory_pool_size == 2)) {
1407 		if (total_memory < dram_size_three_GB)
1408 			goto def_value1;
1409 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1410 		(amdgpu_smu_memory_pool_size == 8)) {
1411 		if (total_memory < dram_size_seven_GB)
1412 			goto def_value1;
1413 	} else {
1414 		DRM_WARN("Smu memory pool size not supported\n");
1415 		goto def_value;
1416 	}
1417 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1418 
1419 	return;
1420 
1421 def_value1:
1422 	DRM_WARN("No enough system memory\n");
1423 def_value:
1424 	adev->pm.smu_prv_buffer_size = 0;
1425 }
1426 
1427 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1428 {
1429 	if (!(adev->flags & AMD_IS_APU) ||
1430 	    adev->asic_type < CHIP_RAVEN)
1431 		return 0;
1432 
1433 	switch (adev->asic_type) {
1434 	case CHIP_RAVEN:
1435 		if (adev->pdev->device == 0x15dd)
1436 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1437 		if (adev->pdev->device == 0x15d8)
1438 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1439 		break;
1440 	case CHIP_RENOIR:
1441 		if ((adev->pdev->device == 0x1636) ||
1442 		    (adev->pdev->device == 0x164c))
1443 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1444 		else
1445 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1446 		break;
1447 	case CHIP_VANGOGH:
1448 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1449 		break;
1450 	case CHIP_YELLOW_CARP:
1451 		break;
1452 	case CHIP_CYAN_SKILLFISH:
1453 		if (adev->pdev->device == 0x13FE)
1454 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1455 		break;
1456 	default:
1457 		break;
1458 	}
1459 
1460 	return 0;
1461 }
1462 
1463 /**
1464  * amdgpu_device_check_arguments - validate module params
1465  *
1466  * @adev: amdgpu_device pointer
1467  *
1468  * Validates certain module parameters and updates
1469  * the associated values used by the driver (all asics).
1470  */
1471 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1472 {
1473 	if (amdgpu_sched_jobs < 4) {
1474 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1475 			 amdgpu_sched_jobs);
1476 		amdgpu_sched_jobs = 4;
1477 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1478 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1479 			 amdgpu_sched_jobs);
1480 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1481 	}
1482 
1483 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1484 		/* gart size must be greater or equal to 32M */
1485 		dev_warn(adev->dev, "gart size (%d) too small\n",
1486 			 amdgpu_gart_size);
1487 		amdgpu_gart_size = -1;
1488 	}
1489 
1490 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1491 		/* gtt size must be greater or equal to 32M */
1492 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1493 				 amdgpu_gtt_size);
1494 		amdgpu_gtt_size = -1;
1495 	}
1496 
1497 	/* valid range is between 4 and 9 inclusive */
1498 	if (amdgpu_vm_fragment_size != -1 &&
1499 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1500 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1501 		amdgpu_vm_fragment_size = -1;
1502 	}
1503 
1504 	if (amdgpu_sched_hw_submission < 2) {
1505 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1506 			 amdgpu_sched_hw_submission);
1507 		amdgpu_sched_hw_submission = 2;
1508 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1509 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1510 			 amdgpu_sched_hw_submission);
1511 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1512 	}
1513 
1514 	amdgpu_device_check_smu_prv_buffer_size(adev);
1515 
1516 	amdgpu_device_check_vm_size(adev);
1517 
1518 	amdgpu_device_check_block_size(adev);
1519 
1520 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1521 
1522 	amdgpu_gmc_tmz_set(adev);
1523 
1524 	amdgpu_gmc_noretry_set(adev);
1525 
1526 	return 0;
1527 }
1528 
1529 /**
1530  * amdgpu_switcheroo_set_state - set switcheroo state
1531  *
1532  * @pdev: pci dev pointer
1533  * @state: vga_switcheroo state
1534  *
1535  * Callback for the switcheroo driver.  Suspends or resumes the
1536  * the asics before or after it is powered up using ACPI methods.
1537  */
1538 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1539 					enum vga_switcheroo_state state)
1540 {
1541 	struct drm_device *dev = pci_get_drvdata(pdev);
1542 	int r;
1543 
1544 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1545 		return;
1546 
1547 	if (state == VGA_SWITCHEROO_ON) {
1548 		pr_info("switched on\n");
1549 		/* don't suspend or resume card normally */
1550 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1551 
1552 		pci_set_power_state(pdev, PCI_D0);
1553 		amdgpu_device_load_pci_state(pdev);
1554 		r = pci_enable_device(pdev);
1555 		if (r)
1556 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1557 		amdgpu_device_resume(dev, true);
1558 
1559 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1560 	} else {
1561 		pr_info("switched off\n");
1562 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1563 		amdgpu_device_suspend(dev, true);
1564 		amdgpu_device_cache_pci_state(pdev);
1565 		/* Shut down the device */
1566 		pci_disable_device(pdev);
1567 		pci_set_power_state(pdev, PCI_D3cold);
1568 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1569 	}
1570 }
1571 
1572 /**
1573  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1574  *
1575  * @pdev: pci dev pointer
1576  *
1577  * Callback for the switcheroo driver.  Check of the switcheroo
1578  * state can be changed.
1579  * Returns true if the state can be changed, false if not.
1580  */
1581 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1582 {
1583 	struct drm_device *dev = pci_get_drvdata(pdev);
1584 
1585 	/*
1586 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1587 	* locking inversion with the driver load path. And the access here is
1588 	* completely racy anyway. So don't bother with locking for now.
1589 	*/
1590 	return atomic_read(&dev->open_count) == 0;
1591 }
1592 
1593 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1594 	.set_gpu_state = amdgpu_switcheroo_set_state,
1595 	.reprobe = NULL,
1596 	.can_switch = amdgpu_switcheroo_can_switch,
1597 };
1598 
1599 /**
1600  * amdgpu_device_ip_set_clockgating_state - set the CG state
1601  *
1602  * @dev: amdgpu_device pointer
1603  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1604  * @state: clockgating state (gate or ungate)
1605  *
1606  * Sets the requested clockgating state for all instances of
1607  * the hardware IP specified.
1608  * Returns the error code from the last instance.
1609  */
1610 int amdgpu_device_ip_set_clockgating_state(void *dev,
1611 					   enum amd_ip_block_type block_type,
1612 					   enum amd_clockgating_state state)
1613 {
1614 	struct amdgpu_device *adev = dev;
1615 	int i, r = 0;
1616 
1617 	for (i = 0; i < adev->num_ip_blocks; i++) {
1618 		if (!adev->ip_blocks[i].status.valid)
1619 			continue;
1620 		if (adev->ip_blocks[i].version->type != block_type)
1621 			continue;
1622 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1623 			continue;
1624 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1625 			(void *)adev, state);
1626 		if (r)
1627 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1628 				  adev->ip_blocks[i].version->funcs->name, r);
1629 	}
1630 	return r;
1631 }
1632 
1633 /**
1634  * amdgpu_device_ip_set_powergating_state - set the PG state
1635  *
1636  * @dev: amdgpu_device pointer
1637  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1638  * @state: powergating state (gate or ungate)
1639  *
1640  * Sets the requested powergating state for all instances of
1641  * the hardware IP specified.
1642  * Returns the error code from the last instance.
1643  */
1644 int amdgpu_device_ip_set_powergating_state(void *dev,
1645 					   enum amd_ip_block_type block_type,
1646 					   enum amd_powergating_state state)
1647 {
1648 	struct amdgpu_device *adev = dev;
1649 	int i, r = 0;
1650 
1651 	for (i = 0; i < adev->num_ip_blocks; i++) {
1652 		if (!adev->ip_blocks[i].status.valid)
1653 			continue;
1654 		if (adev->ip_blocks[i].version->type != block_type)
1655 			continue;
1656 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1657 			continue;
1658 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1659 			(void *)adev, state);
1660 		if (r)
1661 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1662 				  adev->ip_blocks[i].version->funcs->name, r);
1663 	}
1664 	return r;
1665 }
1666 
1667 /**
1668  * amdgpu_device_ip_get_clockgating_state - get the CG state
1669  *
1670  * @adev: amdgpu_device pointer
1671  * @flags: clockgating feature flags
1672  *
1673  * Walks the list of IPs on the device and updates the clockgating
1674  * flags for each IP.
1675  * Updates @flags with the feature flags for each hardware IP where
1676  * clockgating is enabled.
1677  */
1678 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1679 					    u32 *flags)
1680 {
1681 	int i;
1682 
1683 	for (i = 0; i < adev->num_ip_blocks; i++) {
1684 		if (!adev->ip_blocks[i].status.valid)
1685 			continue;
1686 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1687 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1688 	}
1689 }
1690 
1691 /**
1692  * amdgpu_device_ip_wait_for_idle - wait for idle
1693  *
1694  * @adev: amdgpu_device pointer
1695  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1696  *
1697  * Waits for the request hardware IP to be idle.
1698  * Returns 0 for success or a negative error code on failure.
1699  */
1700 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1701 				   enum amd_ip_block_type block_type)
1702 {
1703 	int i, r;
1704 
1705 	for (i = 0; i < adev->num_ip_blocks; i++) {
1706 		if (!adev->ip_blocks[i].status.valid)
1707 			continue;
1708 		if (adev->ip_blocks[i].version->type == block_type) {
1709 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1710 			if (r)
1711 				return r;
1712 			break;
1713 		}
1714 	}
1715 	return 0;
1716 
1717 }
1718 
1719 /**
1720  * amdgpu_device_ip_is_idle - is the hardware IP idle
1721  *
1722  * @adev: amdgpu_device pointer
1723  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1724  *
1725  * Check if the hardware IP is idle or not.
1726  * Returns true if it the IP is idle, false if not.
1727  */
1728 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1729 			      enum amd_ip_block_type block_type)
1730 {
1731 	int i;
1732 
1733 	for (i = 0; i < adev->num_ip_blocks; i++) {
1734 		if (!adev->ip_blocks[i].status.valid)
1735 			continue;
1736 		if (adev->ip_blocks[i].version->type == block_type)
1737 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1738 	}
1739 	return true;
1740 
1741 }
1742 
1743 /**
1744  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1745  *
1746  * @adev: amdgpu_device pointer
1747  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1748  *
1749  * Returns a pointer to the hardware IP block structure
1750  * if it exists for the asic, otherwise NULL.
1751  */
1752 struct amdgpu_ip_block *
1753 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1754 			      enum amd_ip_block_type type)
1755 {
1756 	int i;
1757 
1758 	for (i = 0; i < adev->num_ip_blocks; i++)
1759 		if (adev->ip_blocks[i].version->type == type)
1760 			return &adev->ip_blocks[i];
1761 
1762 	return NULL;
1763 }
1764 
1765 /**
1766  * amdgpu_device_ip_block_version_cmp
1767  *
1768  * @adev: amdgpu_device pointer
1769  * @type: enum amd_ip_block_type
1770  * @major: major version
1771  * @minor: minor version
1772  *
1773  * return 0 if equal or greater
1774  * return 1 if smaller or the ip_block doesn't exist
1775  */
1776 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1777 				       enum amd_ip_block_type type,
1778 				       u32 major, u32 minor)
1779 {
1780 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1781 
1782 	if (ip_block && ((ip_block->version->major > major) ||
1783 			((ip_block->version->major == major) &&
1784 			(ip_block->version->minor >= minor))))
1785 		return 0;
1786 
1787 	return 1;
1788 }
1789 
1790 /**
1791  * amdgpu_device_ip_block_add
1792  *
1793  * @adev: amdgpu_device pointer
1794  * @ip_block_version: pointer to the IP to add
1795  *
1796  * Adds the IP block driver information to the collection of IPs
1797  * on the asic.
1798  */
1799 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1800 			       const struct amdgpu_ip_block_version *ip_block_version)
1801 {
1802 	if (!ip_block_version)
1803 		return -EINVAL;
1804 
1805 	switch (ip_block_version->type) {
1806 	case AMD_IP_BLOCK_TYPE_VCN:
1807 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1808 			return 0;
1809 		break;
1810 	case AMD_IP_BLOCK_TYPE_JPEG:
1811 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1812 			return 0;
1813 		break;
1814 	default:
1815 		break;
1816 	}
1817 
1818 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1819 		  ip_block_version->funcs->name);
1820 
1821 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1822 
1823 	return 0;
1824 }
1825 
1826 /**
1827  * amdgpu_device_enable_virtual_display - enable virtual display feature
1828  *
1829  * @adev: amdgpu_device pointer
1830  *
1831  * Enabled the virtual display feature if the user has enabled it via
1832  * the module parameter virtual_display.  This feature provides a virtual
1833  * display hardware on headless boards or in virtualized environments.
1834  * This function parses and validates the configuration string specified by
1835  * the user and configues the virtual display configuration (number of
1836  * virtual connectors, crtcs, etc.) specified.
1837  */
1838 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1839 {
1840 	adev->enable_virtual_display = false;
1841 
1842 	if (amdgpu_virtual_display) {
1843 		const char *pci_address_name = pci_name(adev->pdev);
1844 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1845 
1846 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1847 		pciaddstr_tmp = pciaddstr;
1848 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1849 			pciaddname = strsep(&pciaddname_tmp, ",");
1850 			if (!strcmp("all", pciaddname)
1851 			    || !strcmp(pci_address_name, pciaddname)) {
1852 				long num_crtc;
1853 				int res = -1;
1854 
1855 				adev->enable_virtual_display = true;
1856 
1857 				if (pciaddname_tmp)
1858 					res = kstrtol(pciaddname_tmp, 10,
1859 						      &num_crtc);
1860 
1861 				if (!res) {
1862 					if (num_crtc < 1)
1863 						num_crtc = 1;
1864 					if (num_crtc > 6)
1865 						num_crtc = 6;
1866 					adev->mode_info.num_crtc = num_crtc;
1867 				} else {
1868 					adev->mode_info.num_crtc = 1;
1869 				}
1870 				break;
1871 			}
1872 		}
1873 
1874 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1875 			 amdgpu_virtual_display, pci_address_name,
1876 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1877 
1878 		kfree(pciaddstr);
1879 	}
1880 }
1881 
1882 /**
1883  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1884  *
1885  * @adev: amdgpu_device pointer
1886  *
1887  * Parses the asic configuration parameters specified in the gpu info
1888  * firmware and makes them availale to the driver for use in configuring
1889  * the asic.
1890  * Returns 0 on success, -EINVAL on failure.
1891  */
1892 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1893 {
1894 	const char *chip_name;
1895 	char fw_name[40];
1896 	int err;
1897 	const struct gpu_info_firmware_header_v1_0 *hdr;
1898 
1899 	adev->firmware.gpu_info_fw = NULL;
1900 
1901 	if (adev->mman.discovery_bin) {
1902 		amdgpu_discovery_get_gfx_info(adev);
1903 
1904 		/*
1905 		 * FIXME: The bounding box is still needed by Navi12, so
1906 		 * temporarily read it from gpu_info firmware. Should be droped
1907 		 * when DAL no longer needs it.
1908 		 */
1909 		if (adev->asic_type != CHIP_NAVI12)
1910 			return 0;
1911 	}
1912 
1913 	switch (adev->asic_type) {
1914 #ifdef CONFIG_DRM_AMDGPU_SI
1915 	case CHIP_VERDE:
1916 	case CHIP_TAHITI:
1917 	case CHIP_PITCAIRN:
1918 	case CHIP_OLAND:
1919 	case CHIP_HAINAN:
1920 #endif
1921 #ifdef CONFIG_DRM_AMDGPU_CIK
1922 	case CHIP_BONAIRE:
1923 	case CHIP_HAWAII:
1924 	case CHIP_KAVERI:
1925 	case CHIP_KABINI:
1926 	case CHIP_MULLINS:
1927 #endif
1928 	case CHIP_TOPAZ:
1929 	case CHIP_TONGA:
1930 	case CHIP_FIJI:
1931 	case CHIP_POLARIS10:
1932 	case CHIP_POLARIS11:
1933 	case CHIP_POLARIS12:
1934 	case CHIP_VEGAM:
1935 	case CHIP_CARRIZO:
1936 	case CHIP_STONEY:
1937 	case CHIP_VEGA20:
1938 	case CHIP_ALDEBARAN:
1939 	case CHIP_SIENNA_CICHLID:
1940 	case CHIP_NAVY_FLOUNDER:
1941 	case CHIP_DIMGREY_CAVEFISH:
1942 	case CHIP_BEIGE_GOBY:
1943 	default:
1944 		return 0;
1945 	case CHIP_VEGA10:
1946 		chip_name = "vega10";
1947 		break;
1948 	case CHIP_VEGA12:
1949 		chip_name = "vega12";
1950 		break;
1951 	case CHIP_RAVEN:
1952 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1953 			chip_name = "raven2";
1954 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1955 			chip_name = "picasso";
1956 		else
1957 			chip_name = "raven";
1958 		break;
1959 	case CHIP_ARCTURUS:
1960 		chip_name = "arcturus";
1961 		break;
1962 	case CHIP_RENOIR:
1963 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1964 			chip_name = "renoir";
1965 		else
1966 			chip_name = "green_sardine";
1967 		break;
1968 	case CHIP_NAVI10:
1969 		chip_name = "navi10";
1970 		break;
1971 	case CHIP_NAVI14:
1972 		chip_name = "navi14";
1973 		break;
1974 	case CHIP_NAVI12:
1975 		chip_name = "navi12";
1976 		break;
1977 	case CHIP_VANGOGH:
1978 		chip_name = "vangogh";
1979 		break;
1980 	case CHIP_YELLOW_CARP:
1981 		chip_name = "yellow_carp";
1982 		break;
1983 	}
1984 
1985 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1986 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1987 	if (err) {
1988 		dev_err(adev->dev,
1989 			"Failed to load gpu_info firmware \"%s\"\n",
1990 			fw_name);
1991 		goto out;
1992 	}
1993 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1994 	if (err) {
1995 		dev_err(adev->dev,
1996 			"Failed to validate gpu_info firmware \"%s\"\n",
1997 			fw_name);
1998 		goto out;
1999 	}
2000 
2001 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2002 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2003 
2004 	switch (hdr->version_major) {
2005 	case 1:
2006 	{
2007 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2008 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2009 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2010 
2011 		/*
2012 		 * Should be droped when DAL no longer needs it.
2013 		 */
2014 		if (adev->asic_type == CHIP_NAVI12)
2015 			goto parse_soc_bounding_box;
2016 
2017 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2018 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2019 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2020 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2021 		adev->gfx.config.max_texture_channel_caches =
2022 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2023 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2024 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2025 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2026 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2027 		adev->gfx.config.double_offchip_lds_buf =
2028 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2029 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2030 		adev->gfx.cu_info.max_waves_per_simd =
2031 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2032 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2033 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2034 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2035 		if (hdr->version_minor >= 1) {
2036 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2037 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2038 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2039 			adev->gfx.config.num_sc_per_sh =
2040 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2041 			adev->gfx.config.num_packer_per_sc =
2042 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2043 		}
2044 
2045 parse_soc_bounding_box:
2046 		/*
2047 		 * soc bounding box info is not integrated in disocovery table,
2048 		 * we always need to parse it from gpu info firmware if needed.
2049 		 */
2050 		if (hdr->version_minor == 2) {
2051 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2052 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2053 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2054 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2055 		}
2056 		break;
2057 	}
2058 	default:
2059 		dev_err(adev->dev,
2060 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2061 		err = -EINVAL;
2062 		goto out;
2063 	}
2064 out:
2065 	return err;
2066 }
2067 
2068 /**
2069  * amdgpu_device_ip_early_init - run early init for hardware IPs
2070  *
2071  * @adev: amdgpu_device pointer
2072  *
2073  * Early initialization pass for hardware IPs.  The hardware IPs that make
2074  * up each asic are discovered each IP's early_init callback is run.  This
2075  * is the first stage in initializing the asic.
2076  * Returns 0 on success, negative error code on failure.
2077  */
2078 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2079 {
2080 	struct drm_device *dev = adev_to_drm(adev);
2081 	struct pci_dev *parent;
2082 	int i, r;
2083 
2084 	amdgpu_device_enable_virtual_display(adev);
2085 
2086 	if (amdgpu_sriov_vf(adev)) {
2087 		r = amdgpu_virt_request_full_gpu(adev, true);
2088 		if (r)
2089 			return r;
2090 	}
2091 
2092 	switch (adev->asic_type) {
2093 #ifdef CONFIG_DRM_AMDGPU_SI
2094 	case CHIP_VERDE:
2095 	case CHIP_TAHITI:
2096 	case CHIP_PITCAIRN:
2097 	case CHIP_OLAND:
2098 	case CHIP_HAINAN:
2099 		adev->family = AMDGPU_FAMILY_SI;
2100 		r = si_set_ip_blocks(adev);
2101 		if (r)
2102 			return r;
2103 		break;
2104 #endif
2105 #ifdef CONFIG_DRM_AMDGPU_CIK
2106 	case CHIP_BONAIRE:
2107 	case CHIP_HAWAII:
2108 	case CHIP_KAVERI:
2109 	case CHIP_KABINI:
2110 	case CHIP_MULLINS:
2111 		if (adev->flags & AMD_IS_APU)
2112 			adev->family = AMDGPU_FAMILY_KV;
2113 		else
2114 			adev->family = AMDGPU_FAMILY_CI;
2115 
2116 		r = cik_set_ip_blocks(adev);
2117 		if (r)
2118 			return r;
2119 		break;
2120 #endif
2121 	case CHIP_TOPAZ:
2122 	case CHIP_TONGA:
2123 	case CHIP_FIJI:
2124 	case CHIP_POLARIS10:
2125 	case CHIP_POLARIS11:
2126 	case CHIP_POLARIS12:
2127 	case CHIP_VEGAM:
2128 	case CHIP_CARRIZO:
2129 	case CHIP_STONEY:
2130 		if (adev->flags & AMD_IS_APU)
2131 			adev->family = AMDGPU_FAMILY_CZ;
2132 		else
2133 			adev->family = AMDGPU_FAMILY_VI;
2134 
2135 		r = vi_set_ip_blocks(adev);
2136 		if (r)
2137 			return r;
2138 		break;
2139 	default:
2140 		r = amdgpu_discovery_set_ip_blocks(adev);
2141 		if (r)
2142 			return r;
2143 		break;
2144 	}
2145 
2146 	if (amdgpu_has_atpx() &&
2147 	    (amdgpu_is_atpx_hybrid() ||
2148 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2149 	    ((adev->flags & AMD_IS_APU) == 0) &&
2150 	    !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
2151 		adev->flags |= AMD_IS_PX;
2152 
2153 	parent = pci_upstream_bridge(adev->pdev);
2154 	adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2155 
2156 	amdgpu_amdkfd_device_probe(adev);
2157 
2158 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2159 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2160 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2161 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2162 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2163 
2164 	for (i = 0; i < adev->num_ip_blocks; i++) {
2165 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2166 			DRM_ERROR("disabled ip block: %d <%s>\n",
2167 				  i, adev->ip_blocks[i].version->funcs->name);
2168 			adev->ip_blocks[i].status.valid = false;
2169 		} else {
2170 			if (adev->ip_blocks[i].version->funcs->early_init) {
2171 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2172 				if (r == -ENOENT) {
2173 					adev->ip_blocks[i].status.valid = false;
2174 				} else if (r) {
2175 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2176 						  adev->ip_blocks[i].version->funcs->name, r);
2177 					return r;
2178 				} else {
2179 					adev->ip_blocks[i].status.valid = true;
2180 				}
2181 			} else {
2182 				adev->ip_blocks[i].status.valid = true;
2183 			}
2184 		}
2185 		/* get the vbios after the asic_funcs are set up */
2186 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2187 			r = amdgpu_device_parse_gpu_info_fw(adev);
2188 			if (r)
2189 				return r;
2190 
2191 			/* Read BIOS */
2192 			if (!amdgpu_get_bios(adev))
2193 				return -EINVAL;
2194 
2195 			r = amdgpu_atombios_init(adev);
2196 			if (r) {
2197 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2198 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2199 				return r;
2200 			}
2201 
2202 			/*get pf2vf msg info at it's earliest time*/
2203 			if (amdgpu_sriov_vf(adev))
2204 				amdgpu_virt_init_data_exchange(adev);
2205 
2206 		}
2207 	}
2208 
2209 	adev->cg_flags &= amdgpu_cg_mask;
2210 	adev->pg_flags &= amdgpu_pg_mask;
2211 
2212 	return 0;
2213 }
2214 
2215 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2216 {
2217 	int i, r;
2218 
2219 	for (i = 0; i < adev->num_ip_blocks; i++) {
2220 		if (!adev->ip_blocks[i].status.sw)
2221 			continue;
2222 		if (adev->ip_blocks[i].status.hw)
2223 			continue;
2224 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2225 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2226 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2227 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2228 			if (r) {
2229 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2230 					  adev->ip_blocks[i].version->funcs->name, r);
2231 				return r;
2232 			}
2233 			adev->ip_blocks[i].status.hw = true;
2234 		}
2235 	}
2236 
2237 	return 0;
2238 }
2239 
2240 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2241 {
2242 	int i, r;
2243 
2244 	for (i = 0; i < adev->num_ip_blocks; i++) {
2245 		if (!adev->ip_blocks[i].status.sw)
2246 			continue;
2247 		if (adev->ip_blocks[i].status.hw)
2248 			continue;
2249 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2250 		if (r) {
2251 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2252 				  adev->ip_blocks[i].version->funcs->name, r);
2253 			return r;
2254 		}
2255 		adev->ip_blocks[i].status.hw = true;
2256 	}
2257 
2258 	return 0;
2259 }
2260 
2261 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2262 {
2263 	int r = 0;
2264 	int i;
2265 	uint32_t smu_version;
2266 
2267 	if (adev->asic_type >= CHIP_VEGA10) {
2268 		for (i = 0; i < adev->num_ip_blocks; i++) {
2269 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2270 				continue;
2271 
2272 			if (!adev->ip_blocks[i].status.sw)
2273 				continue;
2274 
2275 			/* no need to do the fw loading again if already done*/
2276 			if (adev->ip_blocks[i].status.hw == true)
2277 				break;
2278 
2279 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2280 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2281 				if (r) {
2282 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2283 							  adev->ip_blocks[i].version->funcs->name, r);
2284 					return r;
2285 				}
2286 			} else {
2287 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2288 				if (r) {
2289 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2290 							  adev->ip_blocks[i].version->funcs->name, r);
2291 					return r;
2292 				}
2293 			}
2294 
2295 			adev->ip_blocks[i].status.hw = true;
2296 			break;
2297 		}
2298 	}
2299 
2300 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2301 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2302 
2303 	return r;
2304 }
2305 
2306 /**
2307  * amdgpu_device_ip_init - run init for hardware IPs
2308  *
2309  * @adev: amdgpu_device pointer
2310  *
2311  * Main initialization pass for hardware IPs.  The list of all the hardware
2312  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2313  * are run.  sw_init initializes the software state associated with each IP
2314  * and hw_init initializes the hardware associated with each IP.
2315  * Returns 0 on success, negative error code on failure.
2316  */
2317 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2318 {
2319 	int i, r;
2320 
2321 	r = amdgpu_ras_init(adev);
2322 	if (r)
2323 		return r;
2324 
2325 	for (i = 0; i < adev->num_ip_blocks; i++) {
2326 		if (!adev->ip_blocks[i].status.valid)
2327 			continue;
2328 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2329 		if (r) {
2330 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2331 				  adev->ip_blocks[i].version->funcs->name, r);
2332 			goto init_failed;
2333 		}
2334 		adev->ip_blocks[i].status.sw = true;
2335 
2336 		/* need to do gmc hw init early so we can allocate gpu mem */
2337 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2338 			/* Try to reserve bad pages early */
2339 			if (amdgpu_sriov_vf(adev))
2340 				amdgpu_virt_exchange_data(adev);
2341 
2342 			r = amdgpu_device_vram_scratch_init(adev);
2343 			if (r) {
2344 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2345 				goto init_failed;
2346 			}
2347 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2348 			if (r) {
2349 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2350 				goto init_failed;
2351 			}
2352 			r = amdgpu_device_wb_init(adev);
2353 			if (r) {
2354 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2355 				goto init_failed;
2356 			}
2357 			adev->ip_blocks[i].status.hw = true;
2358 
2359 			/* right after GMC hw init, we create CSA */
2360 			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2361 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2362 								AMDGPU_GEM_DOMAIN_VRAM,
2363 								AMDGPU_CSA_SIZE);
2364 				if (r) {
2365 					DRM_ERROR("allocate CSA failed %d\n", r);
2366 					goto init_failed;
2367 				}
2368 			}
2369 		}
2370 	}
2371 
2372 	if (amdgpu_sriov_vf(adev))
2373 		amdgpu_virt_init_data_exchange(adev);
2374 
2375 	r = amdgpu_ib_pool_init(adev);
2376 	if (r) {
2377 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2378 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2379 		goto init_failed;
2380 	}
2381 
2382 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2383 	if (r)
2384 		goto init_failed;
2385 
2386 	r = amdgpu_device_ip_hw_init_phase1(adev);
2387 	if (r)
2388 		goto init_failed;
2389 
2390 	r = amdgpu_device_fw_loading(adev);
2391 	if (r)
2392 		goto init_failed;
2393 
2394 	r = amdgpu_device_ip_hw_init_phase2(adev);
2395 	if (r)
2396 		goto init_failed;
2397 
2398 	/*
2399 	 * retired pages will be loaded from eeprom and reserved here,
2400 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2401 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2402 	 * for I2C communication which only true at this point.
2403 	 *
2404 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2405 	 * failure from bad gpu situation and stop amdgpu init process
2406 	 * accordingly. For other failed cases, it will still release all
2407 	 * the resource and print error message, rather than returning one
2408 	 * negative value to upper level.
2409 	 *
2410 	 * Note: theoretically, this should be called before all vram allocations
2411 	 * to protect retired page from abusing
2412 	 */
2413 	r = amdgpu_ras_recovery_init(adev);
2414 	if (r)
2415 		goto init_failed;
2416 
2417 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2418 		amdgpu_xgmi_add_device(adev);
2419 
2420 	/* Don't init kfd if whole hive need to be reset during init */
2421 	if (!adev->gmc.xgmi.pending_reset)
2422 		amdgpu_amdkfd_device_init(adev);
2423 
2424 	amdgpu_fru_get_product_info(adev);
2425 
2426 init_failed:
2427 	if (amdgpu_sriov_vf(adev))
2428 		amdgpu_virt_release_full_gpu(adev, true);
2429 
2430 	return r;
2431 }
2432 
2433 /**
2434  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2435  *
2436  * @adev: amdgpu_device pointer
2437  *
2438  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2439  * this function before a GPU reset.  If the value is retained after a
2440  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2441  */
2442 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2443 {
2444 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2445 }
2446 
2447 /**
2448  * amdgpu_device_check_vram_lost - check if vram is valid
2449  *
2450  * @adev: amdgpu_device pointer
2451  *
2452  * Checks the reset magic value written to the gart pointer in VRAM.
2453  * The driver calls this after a GPU reset to see if the contents of
2454  * VRAM is lost or now.
2455  * returns true if vram is lost, false if not.
2456  */
2457 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2458 {
2459 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2460 			AMDGPU_RESET_MAGIC_NUM))
2461 		return true;
2462 
2463 	if (!amdgpu_in_reset(adev))
2464 		return false;
2465 
2466 	/*
2467 	 * For all ASICs with baco/mode1 reset, the VRAM is
2468 	 * always assumed to be lost.
2469 	 */
2470 	switch (amdgpu_asic_reset_method(adev)) {
2471 	case AMD_RESET_METHOD_BACO:
2472 	case AMD_RESET_METHOD_MODE1:
2473 		return true;
2474 	default:
2475 		return false;
2476 	}
2477 }
2478 
2479 /**
2480  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2481  *
2482  * @adev: amdgpu_device pointer
2483  * @state: clockgating state (gate or ungate)
2484  *
2485  * The list of all the hardware IPs that make up the asic is walked and the
2486  * set_clockgating_state callbacks are run.
2487  * Late initialization pass enabling clockgating for hardware IPs.
2488  * Fini or suspend, pass disabling clockgating for hardware IPs.
2489  * Returns 0 on success, negative error code on failure.
2490  */
2491 
2492 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2493 			       enum amd_clockgating_state state)
2494 {
2495 	int i, j, r;
2496 
2497 	if (amdgpu_emu_mode == 1)
2498 		return 0;
2499 
2500 	for (j = 0; j < adev->num_ip_blocks; j++) {
2501 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2502 		if (!adev->ip_blocks[i].status.late_initialized)
2503 			continue;
2504 		/* skip CG for GFX on S0ix */
2505 		if (adev->in_s0ix &&
2506 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2507 			continue;
2508 		/* skip CG for VCE/UVD, it's handled specially */
2509 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2510 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2511 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2512 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2513 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2514 			/* enable clockgating to save power */
2515 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2516 										     state);
2517 			if (r) {
2518 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2519 					  adev->ip_blocks[i].version->funcs->name, r);
2520 				return r;
2521 			}
2522 		}
2523 	}
2524 
2525 	return 0;
2526 }
2527 
2528 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2529 			       enum amd_powergating_state state)
2530 {
2531 	int i, j, r;
2532 
2533 	if (amdgpu_emu_mode == 1)
2534 		return 0;
2535 
2536 	for (j = 0; j < adev->num_ip_blocks; j++) {
2537 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2538 		if (!adev->ip_blocks[i].status.late_initialized)
2539 			continue;
2540 		/* skip PG for GFX on S0ix */
2541 		if (adev->in_s0ix &&
2542 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2543 			continue;
2544 		/* skip CG for VCE/UVD, it's handled specially */
2545 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2546 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2547 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2548 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2549 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2550 			/* enable powergating to save power */
2551 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2552 											state);
2553 			if (r) {
2554 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2555 					  adev->ip_blocks[i].version->funcs->name, r);
2556 				return r;
2557 			}
2558 		}
2559 	}
2560 	return 0;
2561 }
2562 
2563 static int amdgpu_device_enable_mgpu_fan_boost(void)
2564 {
2565 	struct amdgpu_gpu_instance *gpu_ins;
2566 	struct amdgpu_device *adev;
2567 	int i, ret = 0;
2568 
2569 	mutex_lock(&mgpu_info.mutex);
2570 
2571 	/*
2572 	 * MGPU fan boost feature should be enabled
2573 	 * only when there are two or more dGPUs in
2574 	 * the system
2575 	 */
2576 	if (mgpu_info.num_dgpu < 2)
2577 		goto out;
2578 
2579 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2580 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2581 		adev = gpu_ins->adev;
2582 		if (!(adev->flags & AMD_IS_APU) &&
2583 		    !gpu_ins->mgpu_fan_enabled) {
2584 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2585 			if (ret)
2586 				break;
2587 
2588 			gpu_ins->mgpu_fan_enabled = 1;
2589 		}
2590 	}
2591 
2592 out:
2593 	mutex_unlock(&mgpu_info.mutex);
2594 
2595 	return ret;
2596 }
2597 
2598 /**
2599  * amdgpu_device_ip_late_init - run late init for hardware IPs
2600  *
2601  * @adev: amdgpu_device pointer
2602  *
2603  * Late initialization pass for hardware IPs.  The list of all the hardware
2604  * IPs that make up the asic is walked and the late_init callbacks are run.
2605  * late_init covers any special initialization that an IP requires
2606  * after all of the have been initialized or something that needs to happen
2607  * late in the init process.
2608  * Returns 0 on success, negative error code on failure.
2609  */
2610 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2611 {
2612 	struct amdgpu_gpu_instance *gpu_instance;
2613 	int i = 0, r;
2614 
2615 	for (i = 0; i < adev->num_ip_blocks; i++) {
2616 		if (!adev->ip_blocks[i].status.hw)
2617 			continue;
2618 		if (adev->ip_blocks[i].version->funcs->late_init) {
2619 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2620 			if (r) {
2621 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2622 					  adev->ip_blocks[i].version->funcs->name, r);
2623 				return r;
2624 			}
2625 		}
2626 		adev->ip_blocks[i].status.late_initialized = true;
2627 	}
2628 
2629 	amdgpu_ras_set_error_query_ready(adev, true);
2630 
2631 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2632 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2633 
2634 	amdgpu_device_fill_reset_magic(adev);
2635 
2636 	r = amdgpu_device_enable_mgpu_fan_boost();
2637 	if (r)
2638 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2639 
2640 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2641 	if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
2642 			       adev->asic_type == CHIP_ALDEBARAN ))
2643 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2644 
2645 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2646 		mutex_lock(&mgpu_info.mutex);
2647 
2648 		/*
2649 		 * Reset device p-state to low as this was booted with high.
2650 		 *
2651 		 * This should be performed only after all devices from the same
2652 		 * hive get initialized.
2653 		 *
2654 		 * However, it's unknown how many device in the hive in advance.
2655 		 * As this is counted one by one during devices initializations.
2656 		 *
2657 		 * So, we wait for all XGMI interlinked devices initialized.
2658 		 * This may bring some delays as those devices may come from
2659 		 * different hives. But that should be OK.
2660 		 */
2661 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2662 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2663 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2664 				if (gpu_instance->adev->flags & AMD_IS_APU)
2665 					continue;
2666 
2667 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2668 						AMDGPU_XGMI_PSTATE_MIN);
2669 				if (r) {
2670 					DRM_ERROR("pstate setting failed (%d).\n", r);
2671 					break;
2672 				}
2673 			}
2674 		}
2675 
2676 		mutex_unlock(&mgpu_info.mutex);
2677 	}
2678 
2679 	return 0;
2680 }
2681 
2682 /**
2683  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2684  *
2685  * @adev: amdgpu_device pointer
2686  *
2687  * For ASICs need to disable SMC first
2688  */
2689 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2690 {
2691 	int i, r;
2692 
2693 	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2694 		return;
2695 
2696 	for (i = 0; i < adev->num_ip_blocks; i++) {
2697 		if (!adev->ip_blocks[i].status.hw)
2698 			continue;
2699 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2700 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2701 			/* XXX handle errors */
2702 			if (r) {
2703 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2704 					  adev->ip_blocks[i].version->funcs->name, r);
2705 			}
2706 			adev->ip_blocks[i].status.hw = false;
2707 			break;
2708 		}
2709 	}
2710 }
2711 
2712 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2713 {
2714 	int i, r;
2715 
2716 	for (i = 0; i < adev->num_ip_blocks; i++) {
2717 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2718 			continue;
2719 
2720 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2721 		if (r) {
2722 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2723 				  adev->ip_blocks[i].version->funcs->name, r);
2724 		}
2725 	}
2726 
2727 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2728 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2729 
2730 	amdgpu_amdkfd_suspend(adev, false);
2731 
2732 	/* Workaroud for ASICs need to disable SMC first */
2733 	amdgpu_device_smu_fini_early(adev);
2734 
2735 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2736 		if (!adev->ip_blocks[i].status.hw)
2737 			continue;
2738 
2739 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2740 		/* XXX handle errors */
2741 		if (r) {
2742 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2743 				  adev->ip_blocks[i].version->funcs->name, r);
2744 		}
2745 
2746 		adev->ip_blocks[i].status.hw = false;
2747 	}
2748 
2749 	if (amdgpu_sriov_vf(adev)) {
2750 		if (amdgpu_virt_release_full_gpu(adev, false))
2751 			DRM_ERROR("failed to release exclusive mode on fini\n");
2752 	}
2753 
2754 	return 0;
2755 }
2756 
2757 /**
2758  * amdgpu_device_ip_fini - run fini for hardware IPs
2759  *
2760  * @adev: amdgpu_device pointer
2761  *
2762  * Main teardown pass for hardware IPs.  The list of all the hardware
2763  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2764  * are run.  hw_fini tears down the hardware associated with each IP
2765  * and sw_fini tears down any software state associated with each IP.
2766  * Returns 0 on success, negative error code on failure.
2767  */
2768 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2769 {
2770 	int i, r;
2771 
2772 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2773 		amdgpu_virt_release_ras_err_handler_data(adev);
2774 
2775 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2776 		amdgpu_xgmi_remove_device(adev);
2777 
2778 	amdgpu_amdkfd_device_fini_sw(adev);
2779 
2780 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2781 		if (!adev->ip_blocks[i].status.sw)
2782 			continue;
2783 
2784 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2785 			amdgpu_ucode_free_bo(adev);
2786 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2787 			amdgpu_device_wb_fini(adev);
2788 			amdgpu_device_vram_scratch_fini(adev);
2789 			amdgpu_ib_pool_fini(adev);
2790 		}
2791 
2792 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2793 		/* XXX handle errors */
2794 		if (r) {
2795 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2796 				  adev->ip_blocks[i].version->funcs->name, r);
2797 		}
2798 		adev->ip_blocks[i].status.sw = false;
2799 		adev->ip_blocks[i].status.valid = false;
2800 	}
2801 
2802 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2803 		if (!adev->ip_blocks[i].status.late_initialized)
2804 			continue;
2805 		if (adev->ip_blocks[i].version->funcs->late_fini)
2806 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2807 		adev->ip_blocks[i].status.late_initialized = false;
2808 	}
2809 
2810 	amdgpu_ras_fini(adev);
2811 
2812 	return 0;
2813 }
2814 
2815 /**
2816  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2817  *
2818  * @work: work_struct.
2819  */
2820 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2821 {
2822 	struct amdgpu_device *adev =
2823 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2824 	int r;
2825 
2826 	r = amdgpu_ib_ring_tests(adev);
2827 	if (r)
2828 		DRM_ERROR("ib ring test failed (%d).\n", r);
2829 }
2830 
2831 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2832 {
2833 	struct amdgpu_device *adev =
2834 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2835 
2836 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2837 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2838 
2839 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2840 		adev->gfx.gfx_off_state = true;
2841 }
2842 
2843 /**
2844  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2845  *
2846  * @adev: amdgpu_device pointer
2847  *
2848  * Main suspend function for hardware IPs.  The list of all the hardware
2849  * IPs that make up the asic is walked, clockgating is disabled and the
2850  * suspend callbacks are run.  suspend puts the hardware and software state
2851  * in each IP into a state suitable for suspend.
2852  * Returns 0 on success, negative error code on failure.
2853  */
2854 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2855 {
2856 	int i, r;
2857 
2858 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2859 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2860 
2861 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2862 		if (!adev->ip_blocks[i].status.valid)
2863 			continue;
2864 
2865 		/* displays are handled separately */
2866 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2867 			continue;
2868 
2869 		/* XXX handle errors */
2870 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2871 		/* XXX handle errors */
2872 		if (r) {
2873 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2874 				  adev->ip_blocks[i].version->funcs->name, r);
2875 			return r;
2876 		}
2877 
2878 		adev->ip_blocks[i].status.hw = false;
2879 	}
2880 
2881 	return 0;
2882 }
2883 
2884 /**
2885  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2886  *
2887  * @adev: amdgpu_device pointer
2888  *
2889  * Main suspend function for hardware IPs.  The list of all the hardware
2890  * IPs that make up the asic is walked, clockgating is disabled and the
2891  * suspend callbacks are run.  suspend puts the hardware and software state
2892  * in each IP into a state suitable for suspend.
2893  * Returns 0 on success, negative error code on failure.
2894  */
2895 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2896 {
2897 	int i, r;
2898 
2899 	if (adev->in_s0ix)
2900 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2901 
2902 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2903 		if (!adev->ip_blocks[i].status.valid)
2904 			continue;
2905 		/* displays are handled in phase1 */
2906 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2907 			continue;
2908 		/* PSP lost connection when err_event_athub occurs */
2909 		if (amdgpu_ras_intr_triggered() &&
2910 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2911 			adev->ip_blocks[i].status.hw = false;
2912 			continue;
2913 		}
2914 
2915 		/* skip unnecessary suspend if we do not initialize them yet */
2916 		if (adev->gmc.xgmi.pending_reset &&
2917 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2918 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2919 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2920 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2921 			adev->ip_blocks[i].status.hw = false;
2922 			continue;
2923 		}
2924 
2925 		/* skip suspend of gfx and psp for S0ix
2926 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2927 		 * like at runtime. PSP is also part of the always on hardware
2928 		 * so no need to suspend it.
2929 		 */
2930 		if (adev->in_s0ix &&
2931 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2932 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2933 			continue;
2934 
2935 		/* XXX handle errors */
2936 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2937 		/* XXX handle errors */
2938 		if (r) {
2939 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2940 				  adev->ip_blocks[i].version->funcs->name, r);
2941 		}
2942 		adev->ip_blocks[i].status.hw = false;
2943 		/* handle putting the SMC in the appropriate state */
2944 		if(!amdgpu_sriov_vf(adev)){
2945 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2946 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2947 				if (r) {
2948 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2949 							adev->mp1_state, r);
2950 					return r;
2951 				}
2952 			}
2953 		}
2954 	}
2955 
2956 	return 0;
2957 }
2958 
2959 /**
2960  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2961  *
2962  * @adev: amdgpu_device pointer
2963  *
2964  * Main suspend function for hardware IPs.  The list of all the hardware
2965  * IPs that make up the asic is walked, clockgating is disabled and the
2966  * suspend callbacks are run.  suspend puts the hardware and software state
2967  * in each IP into a state suitable for suspend.
2968  * Returns 0 on success, negative error code on failure.
2969  */
2970 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2971 {
2972 	int r;
2973 
2974 	if (amdgpu_sriov_vf(adev)) {
2975 		amdgpu_virt_fini_data_exchange(adev);
2976 		amdgpu_virt_request_full_gpu(adev, false);
2977 	}
2978 
2979 	r = amdgpu_device_ip_suspend_phase1(adev);
2980 	if (r)
2981 		return r;
2982 	r = amdgpu_device_ip_suspend_phase2(adev);
2983 
2984 	if (amdgpu_sriov_vf(adev))
2985 		amdgpu_virt_release_full_gpu(adev, false);
2986 
2987 	return r;
2988 }
2989 
2990 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2991 {
2992 	int i, r;
2993 
2994 	static enum amd_ip_block_type ip_order[] = {
2995 		AMD_IP_BLOCK_TYPE_GMC,
2996 		AMD_IP_BLOCK_TYPE_COMMON,
2997 		AMD_IP_BLOCK_TYPE_PSP,
2998 		AMD_IP_BLOCK_TYPE_IH,
2999 	};
3000 
3001 	for (i = 0; i < adev->num_ip_blocks; i++) {
3002 		int j;
3003 		struct amdgpu_ip_block *block;
3004 
3005 		block = &adev->ip_blocks[i];
3006 		block->status.hw = false;
3007 
3008 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3009 
3010 			if (block->version->type != ip_order[j] ||
3011 				!block->status.valid)
3012 				continue;
3013 
3014 			r = block->version->funcs->hw_init(adev);
3015 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3016 			if (r)
3017 				return r;
3018 			block->status.hw = true;
3019 		}
3020 	}
3021 
3022 	return 0;
3023 }
3024 
3025 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3026 {
3027 	int i, r;
3028 
3029 	static enum amd_ip_block_type ip_order[] = {
3030 		AMD_IP_BLOCK_TYPE_SMC,
3031 		AMD_IP_BLOCK_TYPE_DCE,
3032 		AMD_IP_BLOCK_TYPE_GFX,
3033 		AMD_IP_BLOCK_TYPE_SDMA,
3034 		AMD_IP_BLOCK_TYPE_UVD,
3035 		AMD_IP_BLOCK_TYPE_VCE,
3036 		AMD_IP_BLOCK_TYPE_VCN
3037 	};
3038 
3039 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3040 		int j;
3041 		struct amdgpu_ip_block *block;
3042 
3043 		for (j = 0; j < adev->num_ip_blocks; j++) {
3044 			block = &adev->ip_blocks[j];
3045 
3046 			if (block->version->type != ip_order[i] ||
3047 				!block->status.valid ||
3048 				block->status.hw)
3049 				continue;
3050 
3051 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3052 				r = block->version->funcs->resume(adev);
3053 			else
3054 				r = block->version->funcs->hw_init(adev);
3055 
3056 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3057 			if (r)
3058 				return r;
3059 			block->status.hw = true;
3060 		}
3061 	}
3062 
3063 	return 0;
3064 }
3065 
3066 /**
3067  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3068  *
3069  * @adev: amdgpu_device pointer
3070  *
3071  * First resume function for hardware IPs.  The list of all the hardware
3072  * IPs that make up the asic is walked and the resume callbacks are run for
3073  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3074  * after a suspend and updates the software state as necessary.  This
3075  * function is also used for restoring the GPU after a GPU reset.
3076  * Returns 0 on success, negative error code on failure.
3077  */
3078 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3079 {
3080 	int i, r;
3081 
3082 	for (i = 0; i < adev->num_ip_blocks; i++) {
3083 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3084 			continue;
3085 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3086 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3087 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3088 
3089 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3090 			if (r) {
3091 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3092 					  adev->ip_blocks[i].version->funcs->name, r);
3093 				return r;
3094 			}
3095 			adev->ip_blocks[i].status.hw = true;
3096 		}
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 /**
3103  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3104  *
3105  * @adev: amdgpu_device pointer
3106  *
3107  * First resume function for hardware IPs.  The list of all the hardware
3108  * IPs that make up the asic is walked and the resume callbacks are run for
3109  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3110  * functional state after a suspend and updates the software state as
3111  * necessary.  This function is also used for restoring the GPU after a GPU
3112  * reset.
3113  * Returns 0 on success, negative error code on failure.
3114  */
3115 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3116 {
3117 	int i, r;
3118 
3119 	for (i = 0; i < adev->num_ip_blocks; i++) {
3120 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3121 			continue;
3122 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3123 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3124 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3125 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3126 			continue;
3127 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3128 		if (r) {
3129 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3130 				  adev->ip_blocks[i].version->funcs->name, r);
3131 			return r;
3132 		}
3133 		adev->ip_blocks[i].status.hw = true;
3134 	}
3135 
3136 	return 0;
3137 }
3138 
3139 /**
3140  * amdgpu_device_ip_resume - run resume for hardware IPs
3141  *
3142  * @adev: amdgpu_device pointer
3143  *
3144  * Main resume function for hardware IPs.  The hardware IPs
3145  * are split into two resume functions because they are
3146  * are also used in in recovering from a GPU reset and some additional
3147  * steps need to be take between them.  In this case (S3/S4) they are
3148  * run sequentially.
3149  * Returns 0 on success, negative error code on failure.
3150  */
3151 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3152 {
3153 	int r;
3154 
3155 	r = amdgpu_amdkfd_resume_iommu(adev);
3156 	if (r)
3157 		return r;
3158 
3159 	r = amdgpu_device_ip_resume_phase1(adev);
3160 	if (r)
3161 		return r;
3162 
3163 	r = amdgpu_device_fw_loading(adev);
3164 	if (r)
3165 		return r;
3166 
3167 	r = amdgpu_device_ip_resume_phase2(adev);
3168 
3169 	return r;
3170 }
3171 
3172 /**
3173  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3174  *
3175  * @adev: amdgpu_device pointer
3176  *
3177  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3178  */
3179 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3180 {
3181 	if (amdgpu_sriov_vf(adev)) {
3182 		if (adev->is_atom_fw) {
3183 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3184 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3185 		} else {
3186 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3187 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3188 		}
3189 
3190 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3191 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3192 	}
3193 }
3194 
3195 /**
3196  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3197  *
3198  * @asic_type: AMD asic type
3199  *
3200  * Check if there is DC (new modesetting infrastructre) support for an asic.
3201  * returns true if DC has support, false if not.
3202  */
3203 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3204 {
3205 	switch (asic_type) {
3206 #ifdef CONFIG_DRM_AMDGPU_SI
3207 	case CHIP_HAINAN:
3208 #endif
3209 	case CHIP_TOPAZ:
3210 		/* chips with no display hardware */
3211 		return false;
3212 #if defined(CONFIG_DRM_AMD_DC)
3213 	case CHIP_TAHITI:
3214 	case CHIP_PITCAIRN:
3215 	case CHIP_VERDE:
3216 	case CHIP_OLAND:
3217 		/*
3218 		 * We have systems in the wild with these ASICs that require
3219 		 * LVDS and VGA support which is not supported with DC.
3220 		 *
3221 		 * Fallback to the non-DC driver here by default so as not to
3222 		 * cause regressions.
3223 		 */
3224 #if defined(CONFIG_DRM_AMD_DC_SI)
3225 		return amdgpu_dc > 0;
3226 #else
3227 		return false;
3228 #endif
3229 	case CHIP_BONAIRE:
3230 	case CHIP_KAVERI:
3231 	case CHIP_KABINI:
3232 	case CHIP_MULLINS:
3233 		/*
3234 		 * We have systems in the wild with these ASICs that require
3235 		 * LVDS and VGA support which is not supported with DC.
3236 		 *
3237 		 * Fallback to the non-DC driver here by default so as not to
3238 		 * cause regressions.
3239 		 */
3240 		return amdgpu_dc > 0;
3241 	case CHIP_HAWAII:
3242 	case CHIP_CARRIZO:
3243 	case CHIP_STONEY:
3244 	case CHIP_POLARIS10:
3245 	case CHIP_POLARIS11:
3246 	case CHIP_POLARIS12:
3247 	case CHIP_VEGAM:
3248 	case CHIP_TONGA:
3249 	case CHIP_FIJI:
3250 	case CHIP_VEGA10:
3251 	case CHIP_VEGA12:
3252 	case CHIP_VEGA20:
3253 #if defined(CONFIG_DRM_AMD_DC_DCN)
3254 	case CHIP_RAVEN:
3255 	case CHIP_NAVI10:
3256 	case CHIP_NAVI14:
3257 	case CHIP_NAVI12:
3258 	case CHIP_RENOIR:
3259 	case CHIP_CYAN_SKILLFISH:
3260 	case CHIP_SIENNA_CICHLID:
3261 	case CHIP_NAVY_FLOUNDER:
3262 	case CHIP_DIMGREY_CAVEFISH:
3263 	case CHIP_BEIGE_GOBY:
3264 	case CHIP_VANGOGH:
3265 	case CHIP_YELLOW_CARP:
3266 #endif
3267 	default:
3268 		return amdgpu_dc != 0;
3269 #else
3270 	default:
3271 		if (amdgpu_dc > 0)
3272 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3273 					 "but isn't supported by ASIC, ignoring\n");
3274 		return false;
3275 #endif
3276 	}
3277 }
3278 
3279 /**
3280  * amdgpu_device_has_dc_support - check if dc is supported
3281  *
3282  * @adev: amdgpu_device pointer
3283  *
3284  * Returns true for supported, false for not supported
3285  */
3286 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3287 {
3288 	if (amdgpu_sriov_vf(adev) ||
3289 	    adev->enable_virtual_display ||
3290 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3291 		return false;
3292 
3293 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3294 }
3295 
3296 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3297 {
3298 	struct amdgpu_device *adev =
3299 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3300 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3301 
3302 	/* It's a bug to not have a hive within this function */
3303 	if (WARN_ON(!hive))
3304 		return;
3305 
3306 	/*
3307 	 * Use task barrier to synchronize all xgmi reset works across the
3308 	 * hive. task_barrier_enter and task_barrier_exit will block
3309 	 * until all the threads running the xgmi reset works reach
3310 	 * those points. task_barrier_full will do both blocks.
3311 	 */
3312 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3313 
3314 		task_barrier_enter(&hive->tb);
3315 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3316 
3317 		if (adev->asic_reset_res)
3318 			goto fail;
3319 
3320 		task_barrier_exit(&hive->tb);
3321 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3322 
3323 		if (adev->asic_reset_res)
3324 			goto fail;
3325 
3326 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3327 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3328 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3329 	} else {
3330 
3331 		task_barrier_full(&hive->tb);
3332 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3333 	}
3334 
3335 fail:
3336 	if (adev->asic_reset_res)
3337 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3338 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3339 	amdgpu_put_xgmi_hive(hive);
3340 }
3341 
3342 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3343 {
3344 	char *input = amdgpu_lockup_timeout;
3345 	char *timeout_setting = NULL;
3346 	int index = 0;
3347 	long timeout;
3348 	int ret = 0;
3349 
3350 	/*
3351 	 * By default timeout for non compute jobs is 10000
3352 	 * and 60000 for compute jobs.
3353 	 * In SR-IOV or passthrough mode, timeout for compute
3354 	 * jobs are 60000 by default.
3355 	 */
3356 	adev->gfx_timeout = msecs_to_jiffies(10000);
3357 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3358 	if (amdgpu_sriov_vf(adev))
3359 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3360 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3361 	else
3362 		adev->compute_timeout =  msecs_to_jiffies(60000);
3363 
3364 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3365 		while ((timeout_setting = strsep(&input, ",")) &&
3366 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3367 			ret = kstrtol(timeout_setting, 0, &timeout);
3368 			if (ret)
3369 				return ret;
3370 
3371 			if (timeout == 0) {
3372 				index++;
3373 				continue;
3374 			} else if (timeout < 0) {
3375 				timeout = MAX_SCHEDULE_TIMEOUT;
3376 				dev_warn(adev->dev, "lockup timeout disabled");
3377 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3378 			} else {
3379 				timeout = msecs_to_jiffies(timeout);
3380 			}
3381 
3382 			switch (index++) {
3383 			case 0:
3384 				adev->gfx_timeout = timeout;
3385 				break;
3386 			case 1:
3387 				adev->compute_timeout = timeout;
3388 				break;
3389 			case 2:
3390 				adev->sdma_timeout = timeout;
3391 				break;
3392 			case 3:
3393 				adev->video_timeout = timeout;
3394 				break;
3395 			default:
3396 				break;
3397 			}
3398 		}
3399 		/*
3400 		 * There is only one value specified and
3401 		 * it should apply to all non-compute jobs.
3402 		 */
3403 		if (index == 1) {
3404 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3405 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3406 				adev->compute_timeout = adev->gfx_timeout;
3407 		}
3408 	}
3409 
3410 	return ret;
3411 }
3412 
3413 /**
3414  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3415  *
3416  * @adev: amdgpu_device pointer
3417  *
3418  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3419  */
3420 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3421 {
3422 	struct iommu_domain *domain;
3423 
3424 	domain = iommu_get_domain_for_dev(adev->dev);
3425 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3426 		adev->ram_is_direct_mapped = true;
3427 }
3428 
3429 static const struct attribute *amdgpu_dev_attributes[] = {
3430 	&dev_attr_product_name.attr,
3431 	&dev_attr_product_number.attr,
3432 	&dev_attr_serial_number.attr,
3433 	&dev_attr_pcie_replay_count.attr,
3434 	NULL
3435 };
3436 
3437 /**
3438  * amdgpu_device_init - initialize the driver
3439  *
3440  * @adev: amdgpu_device pointer
3441  * @flags: driver flags
3442  *
3443  * Initializes the driver info and hw (all asics).
3444  * Returns 0 for success or an error on failure.
3445  * Called at driver startup.
3446  */
3447 int amdgpu_device_init(struct amdgpu_device *adev,
3448 		       uint32_t flags)
3449 {
3450 	struct drm_device *ddev = adev_to_drm(adev);
3451 	struct pci_dev *pdev = adev->pdev;
3452 	int r, i;
3453 	bool px = false;
3454 	u32 max_MBps;
3455 
3456 	adev->shutdown = false;
3457 	adev->flags = flags;
3458 
3459 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3460 		adev->asic_type = amdgpu_force_asic_type;
3461 	else
3462 		adev->asic_type = flags & AMD_ASIC_MASK;
3463 
3464 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3465 	if (amdgpu_emu_mode == 1)
3466 		adev->usec_timeout *= 10;
3467 	adev->gmc.gart_size = 512 * 1024 * 1024;
3468 	adev->accel_working = false;
3469 	adev->num_rings = 0;
3470 	adev->mman.buffer_funcs = NULL;
3471 	adev->mman.buffer_funcs_ring = NULL;
3472 	adev->vm_manager.vm_pte_funcs = NULL;
3473 	adev->vm_manager.vm_pte_num_scheds = 0;
3474 	adev->gmc.gmc_funcs = NULL;
3475 	adev->harvest_ip_mask = 0x0;
3476 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3477 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3478 
3479 	adev->smc_rreg = &amdgpu_invalid_rreg;
3480 	adev->smc_wreg = &amdgpu_invalid_wreg;
3481 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3482 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3483 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3484 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3485 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3486 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3487 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3488 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3489 	adev->didt_rreg = &amdgpu_invalid_rreg;
3490 	adev->didt_wreg = &amdgpu_invalid_wreg;
3491 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3492 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3493 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3494 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3495 
3496 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3497 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3498 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3499 
3500 	/* mutex initialization are all done here so we
3501 	 * can recall function without having locking issues */
3502 	mutex_init(&adev->firmware.mutex);
3503 	mutex_init(&adev->pm.mutex);
3504 	mutex_init(&adev->gfx.gpu_clock_mutex);
3505 	mutex_init(&adev->srbm_mutex);
3506 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3507 	mutex_init(&adev->gfx.gfx_off_mutex);
3508 	mutex_init(&adev->grbm_idx_mutex);
3509 	mutex_init(&adev->mn_lock);
3510 	mutex_init(&adev->virt.vf_errors.lock);
3511 	hash_init(adev->mn_hash);
3512 	atomic_set(&adev->in_gpu_reset, 0);
3513 	init_rwsem(&adev->reset_sem);
3514 	mutex_init(&adev->psp.mutex);
3515 	mutex_init(&adev->notifier_lock);
3516 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3517 
3518 	amdgpu_device_init_apu_flags(adev);
3519 
3520 	r = amdgpu_device_check_arguments(adev);
3521 	if (r)
3522 		return r;
3523 
3524 	spin_lock_init(&adev->mmio_idx_lock);
3525 	spin_lock_init(&adev->smc_idx_lock);
3526 	spin_lock_init(&adev->pcie_idx_lock);
3527 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3528 	spin_lock_init(&adev->didt_idx_lock);
3529 	spin_lock_init(&adev->gc_cac_idx_lock);
3530 	spin_lock_init(&adev->se_cac_idx_lock);
3531 	spin_lock_init(&adev->audio_endpt_idx_lock);
3532 	spin_lock_init(&adev->mm_stats.lock);
3533 
3534 	INIT_LIST_HEAD(&adev->shadow_list);
3535 	mutex_init(&adev->shadow_list_lock);
3536 
3537 	INIT_LIST_HEAD(&adev->reset_list);
3538 
3539 	INIT_LIST_HEAD(&adev->ras_list);
3540 
3541 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3542 			  amdgpu_device_delayed_init_work_handler);
3543 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3544 			  amdgpu_device_delay_enable_gfx_off);
3545 
3546 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3547 
3548 	adev->gfx.gfx_off_req_count = 1;
3549 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3550 
3551 	atomic_set(&adev->throttling_logging_enabled, 1);
3552 	/*
3553 	 * If throttling continues, logging will be performed every minute
3554 	 * to avoid log flooding. "-1" is subtracted since the thermal
3555 	 * throttling interrupt comes every second. Thus, the total logging
3556 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3557 	 * for throttling interrupt) = 60 seconds.
3558 	 */
3559 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3560 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3561 
3562 	/* Registers mapping */
3563 	/* TODO: block userspace mapping of io register */
3564 	if (adev->asic_type >= CHIP_BONAIRE) {
3565 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3566 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3567 	} else {
3568 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3569 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3570 	}
3571 
3572 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3573 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3574 
3575 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3576 	if (adev->rmmio == NULL) {
3577 		return -ENOMEM;
3578 	}
3579 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3580 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3581 
3582 	amdgpu_device_get_pcie_info(adev);
3583 
3584 	if (amdgpu_mcbp)
3585 		DRM_INFO("MCBP is enabled\n");
3586 
3587 	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3588 		adev->enable_mes = true;
3589 
3590 	/* detect hw virtualization here */
3591 	amdgpu_detect_virtualization(adev);
3592 
3593 	r = amdgpu_device_get_job_timeout_settings(adev);
3594 	if (r) {
3595 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3596 		return r;
3597 	}
3598 
3599 	/* early init functions */
3600 	r = amdgpu_device_ip_early_init(adev);
3601 	if (r)
3602 		return r;
3603 
3604 	/* Need to get xgmi info early to decide the reset behavior*/
3605 	if (adev->gmc.xgmi.supported) {
3606 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
3607 		if (r)
3608 			return r;
3609 	}
3610 
3611 	/* enable PCIE atomic ops */
3612 	if (amdgpu_sriov_vf(adev))
3613 		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3614 			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3615 			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3616 	else
3617 		adev->have_atomics_support =
3618 			!pci_enable_atomic_ops_to_root(adev->pdev,
3619 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3620 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3621 	if (!adev->have_atomics_support)
3622 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3623 
3624 	/* doorbell bar mapping and doorbell index init*/
3625 	amdgpu_device_doorbell_init(adev);
3626 
3627 	if (amdgpu_emu_mode == 1) {
3628 		/* post the asic on emulation mode */
3629 		emu_soc_asic_init(adev);
3630 		goto fence_driver_init;
3631 	}
3632 
3633 	amdgpu_reset_init(adev);
3634 
3635 	/* detect if we are with an SRIOV vbios */
3636 	amdgpu_device_detect_sriov_bios(adev);
3637 
3638 	/* check if we need to reset the asic
3639 	 *  E.g., driver was not cleanly unloaded previously, etc.
3640 	 */
3641 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3642 		if (adev->gmc.xgmi.num_physical_nodes) {
3643 			dev_info(adev->dev, "Pending hive reset.\n");
3644 			adev->gmc.xgmi.pending_reset = true;
3645 			/* Only need to init necessary block for SMU to handle the reset */
3646 			for (i = 0; i < adev->num_ip_blocks; i++) {
3647 				if (!adev->ip_blocks[i].status.valid)
3648 					continue;
3649 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3650 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3651 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3652 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3653 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3654 						adev->ip_blocks[i].version->funcs->name);
3655 					adev->ip_blocks[i].status.hw = true;
3656 				}
3657 			}
3658 		} else {
3659 			r = amdgpu_asic_reset(adev);
3660 			if (r) {
3661 				dev_err(adev->dev, "asic reset on init failed\n");
3662 				goto failed;
3663 			}
3664 		}
3665 	}
3666 
3667 	pci_enable_pcie_error_reporting(adev->pdev);
3668 
3669 	/* Post card if necessary */
3670 	if (amdgpu_device_need_post(adev)) {
3671 		if (!adev->bios) {
3672 			dev_err(adev->dev, "no vBIOS found\n");
3673 			r = -EINVAL;
3674 			goto failed;
3675 		}
3676 		DRM_INFO("GPU posting now...\n");
3677 		r = amdgpu_device_asic_init(adev);
3678 		if (r) {
3679 			dev_err(adev->dev, "gpu post error!\n");
3680 			goto failed;
3681 		}
3682 	}
3683 
3684 	if (adev->is_atom_fw) {
3685 		/* Initialize clocks */
3686 		r = amdgpu_atomfirmware_get_clock_info(adev);
3687 		if (r) {
3688 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3689 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3690 			goto failed;
3691 		}
3692 	} else {
3693 		/* Initialize clocks */
3694 		r = amdgpu_atombios_get_clock_info(adev);
3695 		if (r) {
3696 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3697 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3698 			goto failed;
3699 		}
3700 		/* init i2c buses */
3701 		if (!amdgpu_device_has_dc_support(adev))
3702 			amdgpu_atombios_i2c_init(adev);
3703 	}
3704 
3705 fence_driver_init:
3706 	/* Fence driver */
3707 	r = amdgpu_fence_driver_sw_init(adev);
3708 	if (r) {
3709 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3710 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3711 		goto failed;
3712 	}
3713 
3714 	/* init the mode config */
3715 	drm_mode_config_init(adev_to_drm(adev));
3716 
3717 	r = amdgpu_device_ip_init(adev);
3718 	if (r) {
3719 		/* failed in exclusive mode due to timeout */
3720 		if (amdgpu_sriov_vf(adev) &&
3721 		    !amdgpu_sriov_runtime(adev) &&
3722 		    amdgpu_virt_mmio_blocked(adev) &&
3723 		    !amdgpu_virt_wait_reset(adev)) {
3724 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3725 			/* Don't send request since VF is inactive. */
3726 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3727 			adev->virt.ops = NULL;
3728 			r = -EAGAIN;
3729 			goto release_ras_con;
3730 		}
3731 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3732 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3733 		goto release_ras_con;
3734 	}
3735 
3736 	amdgpu_fence_driver_hw_init(adev);
3737 
3738 	dev_info(adev->dev,
3739 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3740 			adev->gfx.config.max_shader_engines,
3741 			adev->gfx.config.max_sh_per_se,
3742 			adev->gfx.config.max_cu_per_sh,
3743 			adev->gfx.cu_info.number);
3744 
3745 	adev->accel_working = true;
3746 
3747 	amdgpu_vm_check_compute_bug(adev);
3748 
3749 	/* Initialize the buffer migration limit. */
3750 	if (amdgpu_moverate >= 0)
3751 		max_MBps = amdgpu_moverate;
3752 	else
3753 		max_MBps = 8; /* Allow 8 MB/s. */
3754 	/* Get a log2 for easy divisions. */
3755 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3756 
3757 	r = amdgpu_pm_sysfs_init(adev);
3758 	if (r) {
3759 		adev->pm_sysfs_en = false;
3760 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3761 	} else
3762 		adev->pm_sysfs_en = true;
3763 
3764 	r = amdgpu_ucode_sysfs_init(adev);
3765 	if (r) {
3766 		adev->ucode_sysfs_en = false;
3767 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3768 	} else
3769 		adev->ucode_sysfs_en = true;
3770 
3771 	if ((amdgpu_testing & 1)) {
3772 		if (adev->accel_working)
3773 			amdgpu_test_moves(adev);
3774 		else
3775 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3776 	}
3777 	if (amdgpu_benchmarking) {
3778 		if (adev->accel_working)
3779 			amdgpu_benchmark(adev, amdgpu_benchmarking);
3780 		else
3781 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3782 	}
3783 
3784 	/*
3785 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3786 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3787 	 * gpu instance is counted less.
3788 	 */
3789 	amdgpu_register_gpu_instance(adev);
3790 
3791 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3792 	 * explicit gating rather than handling it automatically.
3793 	 */
3794 	if (!adev->gmc.xgmi.pending_reset) {
3795 		r = amdgpu_device_ip_late_init(adev);
3796 		if (r) {
3797 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3798 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3799 			goto release_ras_con;
3800 		}
3801 		/* must succeed. */
3802 		amdgpu_ras_resume(adev);
3803 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3804 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3805 	}
3806 
3807 	if (amdgpu_sriov_vf(adev))
3808 		flush_delayed_work(&adev->delayed_init_work);
3809 
3810 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3811 	if (r)
3812 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3813 
3814 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3815 		r = amdgpu_pmu_init(adev);
3816 	if (r)
3817 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3818 
3819 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3820 	if (amdgpu_device_cache_pci_state(adev->pdev))
3821 		pci_restore_state(pdev);
3822 
3823 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3824 	/* this will fail for cards that aren't VGA class devices, just
3825 	 * ignore it */
3826 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3827 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3828 
3829 	if (amdgpu_device_supports_px(ddev)) {
3830 		px = true;
3831 		vga_switcheroo_register_client(adev->pdev,
3832 					       &amdgpu_switcheroo_ops, px);
3833 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3834 	}
3835 
3836 	if (adev->gmc.xgmi.pending_reset)
3837 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3838 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3839 
3840 	amdgpu_device_check_iommu_direct_map(adev);
3841 
3842 	return 0;
3843 
3844 release_ras_con:
3845 	amdgpu_release_ras_context(adev);
3846 
3847 failed:
3848 	amdgpu_vf_error_trans_all(adev);
3849 
3850 	return r;
3851 }
3852 
3853 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3854 {
3855 
3856 	/* Clear all CPU mappings pointing to this device */
3857 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3858 
3859 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3860 	amdgpu_device_doorbell_fini(adev);
3861 
3862 	iounmap(adev->rmmio);
3863 	adev->rmmio = NULL;
3864 	if (adev->mman.aper_base_kaddr)
3865 		iounmap(adev->mman.aper_base_kaddr);
3866 	adev->mman.aper_base_kaddr = NULL;
3867 
3868 	/* Memory manager related */
3869 	if (!adev->gmc.xgmi.connected_to_cpu) {
3870 		arch_phys_wc_del(adev->gmc.vram_mtrr);
3871 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3872 	}
3873 }
3874 
3875 /**
3876  * amdgpu_device_fini_hw - tear down the driver
3877  *
3878  * @adev: amdgpu_device pointer
3879  *
3880  * Tear down the driver info (all asics).
3881  * Called at driver shutdown.
3882  */
3883 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3884 {
3885 	dev_info(adev->dev, "amdgpu: finishing device.\n");
3886 	flush_delayed_work(&adev->delayed_init_work);
3887 	if (adev->mman.initialized) {
3888 		flush_delayed_work(&adev->mman.bdev.wq);
3889 		ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3890 	}
3891 	adev->shutdown = true;
3892 
3893 	/* make sure IB test finished before entering exclusive mode
3894 	 * to avoid preemption on IB test
3895 	 * */
3896 	if (amdgpu_sriov_vf(adev)) {
3897 		amdgpu_virt_request_full_gpu(adev, false);
3898 		amdgpu_virt_fini_data_exchange(adev);
3899 	}
3900 
3901 	/* disable all interrupts */
3902 	amdgpu_irq_disable_all(adev);
3903 	if (adev->mode_info.mode_config_initialized){
3904 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3905 			drm_helper_force_disable_all(adev_to_drm(adev));
3906 		else
3907 			drm_atomic_helper_shutdown(adev_to_drm(adev));
3908 	}
3909 	amdgpu_fence_driver_hw_fini(adev);
3910 
3911 	if (adev->pm_sysfs_en)
3912 		amdgpu_pm_sysfs_fini(adev);
3913 	if (adev->ucode_sysfs_en)
3914 		amdgpu_ucode_sysfs_fini(adev);
3915 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3916 
3917 	/* disable ras feature must before hw fini */
3918 	amdgpu_ras_pre_fini(adev);
3919 
3920 	amdgpu_device_ip_fini_early(adev);
3921 
3922 	amdgpu_irq_fini_hw(adev);
3923 
3924 	if (adev->mman.initialized)
3925 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
3926 
3927 	amdgpu_gart_dummy_page_fini(adev);
3928 
3929 	if (drm_dev_is_unplugged(adev_to_drm(adev)))
3930 		amdgpu_device_unmap_mmio(adev);
3931 
3932 }
3933 
3934 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3935 {
3936 	int idx;
3937 
3938 	amdgpu_fence_driver_sw_fini(adev);
3939 	amdgpu_device_ip_fini(adev);
3940 	release_firmware(adev->firmware.gpu_info_fw);
3941 	adev->firmware.gpu_info_fw = NULL;
3942 	adev->accel_working = false;
3943 
3944 	amdgpu_reset_fini(adev);
3945 
3946 	/* free i2c buses */
3947 	if (!amdgpu_device_has_dc_support(adev))
3948 		amdgpu_i2c_fini(adev);
3949 
3950 	if (amdgpu_emu_mode != 1)
3951 		amdgpu_atombios_fini(adev);
3952 
3953 	kfree(adev->bios);
3954 	adev->bios = NULL;
3955 	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3956 		vga_switcheroo_unregister_client(adev->pdev);
3957 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3958 	}
3959 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3960 		vga_client_unregister(adev->pdev);
3961 
3962 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
3963 
3964 		iounmap(adev->rmmio);
3965 		adev->rmmio = NULL;
3966 		amdgpu_device_doorbell_fini(adev);
3967 		drm_dev_exit(idx);
3968 	}
3969 
3970 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3971 		amdgpu_pmu_fini(adev);
3972 	if (adev->mman.discovery_bin)
3973 		amdgpu_discovery_fini(adev);
3974 
3975 	kfree(adev->pci_state);
3976 
3977 }
3978 
3979 /**
3980  * amdgpu_device_evict_resources - evict device resources
3981  * @adev: amdgpu device object
3982  *
3983  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
3984  * of the vram memory type. Mainly used for evicting device resources
3985  * at suspend time.
3986  *
3987  */
3988 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
3989 {
3990 	/* No need to evict vram on APUs for suspend to ram or s2idle */
3991 	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
3992 		return;
3993 
3994 	if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
3995 		DRM_WARN("evicting device resources failed\n");
3996 
3997 }
3998 
3999 /*
4000  * Suspend & resume.
4001  */
4002 /**
4003  * amdgpu_device_suspend - initiate device suspend
4004  *
4005  * @dev: drm dev pointer
4006  * @fbcon : notify the fbdev of suspend
4007  *
4008  * Puts the hw in the suspend state (all asics).
4009  * Returns 0 for success or an error on failure.
4010  * Called at driver suspend.
4011  */
4012 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4013 {
4014 	struct amdgpu_device *adev = drm_to_adev(dev);
4015 
4016 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4017 		return 0;
4018 
4019 	adev->in_suspend = true;
4020 
4021 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4022 		DRM_WARN("smart shift update failed\n");
4023 
4024 	drm_kms_helper_poll_disable(dev);
4025 
4026 	if (fbcon)
4027 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4028 
4029 	cancel_delayed_work_sync(&adev->delayed_init_work);
4030 
4031 	amdgpu_ras_suspend(adev);
4032 
4033 	amdgpu_device_ip_suspend_phase1(adev);
4034 
4035 	if (!adev->in_s0ix)
4036 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4037 
4038 	amdgpu_device_evict_resources(adev);
4039 
4040 	amdgpu_fence_driver_hw_fini(adev);
4041 
4042 	amdgpu_device_ip_suspend_phase2(adev);
4043 
4044 	return 0;
4045 }
4046 
4047 /**
4048  * amdgpu_device_resume - initiate device resume
4049  *
4050  * @dev: drm dev pointer
4051  * @fbcon : notify the fbdev of resume
4052  *
4053  * Bring the hw back to operating state (all asics).
4054  * Returns 0 for success or an error on failure.
4055  * Called at driver resume.
4056  */
4057 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4058 {
4059 	struct amdgpu_device *adev = drm_to_adev(dev);
4060 	int r = 0;
4061 
4062 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4063 		return 0;
4064 
4065 	if (adev->in_s0ix)
4066 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4067 
4068 	/* post card */
4069 	if (amdgpu_device_need_post(adev)) {
4070 		r = amdgpu_device_asic_init(adev);
4071 		if (r)
4072 			dev_err(adev->dev, "amdgpu asic init failed\n");
4073 	}
4074 
4075 	r = amdgpu_device_ip_resume(adev);
4076 	if (r) {
4077 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4078 		return r;
4079 	}
4080 	amdgpu_fence_driver_hw_init(adev);
4081 
4082 	r = amdgpu_device_ip_late_init(adev);
4083 	if (r)
4084 		return r;
4085 
4086 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4087 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4088 
4089 	if (!adev->in_s0ix) {
4090 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4091 		if (r)
4092 			return r;
4093 	}
4094 
4095 	/* Make sure IB tests flushed */
4096 	flush_delayed_work(&adev->delayed_init_work);
4097 
4098 	if (fbcon)
4099 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4100 
4101 	drm_kms_helper_poll_enable(dev);
4102 
4103 	amdgpu_ras_resume(adev);
4104 
4105 	/*
4106 	 * Most of the connector probing functions try to acquire runtime pm
4107 	 * refs to ensure that the GPU is powered on when connector polling is
4108 	 * performed. Since we're calling this from a runtime PM callback,
4109 	 * trying to acquire rpm refs will cause us to deadlock.
4110 	 *
4111 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
4112 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
4113 	 */
4114 #ifdef CONFIG_PM
4115 	dev->dev->power.disable_depth++;
4116 #endif
4117 	if (!amdgpu_device_has_dc_support(adev))
4118 		drm_helper_hpd_irq_event(dev);
4119 	else
4120 		drm_kms_helper_hotplug_event(dev);
4121 #ifdef CONFIG_PM
4122 	dev->dev->power.disable_depth--;
4123 #endif
4124 	adev->in_suspend = false;
4125 
4126 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4127 		DRM_WARN("smart shift update failed\n");
4128 
4129 	return 0;
4130 }
4131 
4132 /**
4133  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4134  *
4135  * @adev: amdgpu_device pointer
4136  *
4137  * The list of all the hardware IPs that make up the asic is walked and
4138  * the check_soft_reset callbacks are run.  check_soft_reset determines
4139  * if the asic is still hung or not.
4140  * Returns true if any of the IPs are still in a hung state, false if not.
4141  */
4142 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4143 {
4144 	int i;
4145 	bool asic_hang = false;
4146 
4147 	if (amdgpu_sriov_vf(adev))
4148 		return true;
4149 
4150 	if (amdgpu_asic_need_full_reset(adev))
4151 		return true;
4152 
4153 	for (i = 0; i < adev->num_ip_blocks; i++) {
4154 		if (!adev->ip_blocks[i].status.valid)
4155 			continue;
4156 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4157 			adev->ip_blocks[i].status.hang =
4158 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4159 		if (adev->ip_blocks[i].status.hang) {
4160 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4161 			asic_hang = true;
4162 		}
4163 	}
4164 	return asic_hang;
4165 }
4166 
4167 /**
4168  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4169  *
4170  * @adev: amdgpu_device pointer
4171  *
4172  * The list of all the hardware IPs that make up the asic is walked and the
4173  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4174  * handles any IP specific hardware or software state changes that are
4175  * necessary for a soft reset to succeed.
4176  * Returns 0 on success, negative error code on failure.
4177  */
4178 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4179 {
4180 	int i, r = 0;
4181 
4182 	for (i = 0; i < adev->num_ip_blocks; i++) {
4183 		if (!adev->ip_blocks[i].status.valid)
4184 			continue;
4185 		if (adev->ip_blocks[i].status.hang &&
4186 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4187 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4188 			if (r)
4189 				return r;
4190 		}
4191 	}
4192 
4193 	return 0;
4194 }
4195 
4196 /**
4197  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4198  *
4199  * @adev: amdgpu_device pointer
4200  *
4201  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4202  * reset is necessary to recover.
4203  * Returns true if a full asic reset is required, false if not.
4204  */
4205 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4206 {
4207 	int i;
4208 
4209 	if (amdgpu_asic_need_full_reset(adev))
4210 		return true;
4211 
4212 	for (i = 0; i < adev->num_ip_blocks; i++) {
4213 		if (!adev->ip_blocks[i].status.valid)
4214 			continue;
4215 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4216 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4217 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4218 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4219 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4220 			if (adev->ip_blocks[i].status.hang) {
4221 				dev_info(adev->dev, "Some block need full reset!\n");
4222 				return true;
4223 			}
4224 		}
4225 	}
4226 	return false;
4227 }
4228 
4229 /**
4230  * amdgpu_device_ip_soft_reset - do a soft reset
4231  *
4232  * @adev: amdgpu_device pointer
4233  *
4234  * The list of all the hardware IPs that make up the asic is walked and the
4235  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4236  * IP specific hardware or software state changes that are necessary to soft
4237  * reset the IP.
4238  * Returns 0 on success, negative error code on failure.
4239  */
4240 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4241 {
4242 	int i, r = 0;
4243 
4244 	for (i = 0; i < adev->num_ip_blocks; i++) {
4245 		if (!adev->ip_blocks[i].status.valid)
4246 			continue;
4247 		if (adev->ip_blocks[i].status.hang &&
4248 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4249 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4250 			if (r)
4251 				return r;
4252 		}
4253 	}
4254 
4255 	return 0;
4256 }
4257 
4258 /**
4259  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4260  *
4261  * @adev: amdgpu_device pointer
4262  *
4263  * The list of all the hardware IPs that make up the asic is walked and the
4264  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4265  * handles any IP specific hardware or software state changes that are
4266  * necessary after the IP has been soft reset.
4267  * Returns 0 on success, negative error code on failure.
4268  */
4269 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4270 {
4271 	int i, r = 0;
4272 
4273 	for (i = 0; i < adev->num_ip_blocks; i++) {
4274 		if (!adev->ip_blocks[i].status.valid)
4275 			continue;
4276 		if (adev->ip_blocks[i].status.hang &&
4277 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4278 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4279 		if (r)
4280 			return r;
4281 	}
4282 
4283 	return 0;
4284 }
4285 
4286 /**
4287  * amdgpu_device_recover_vram - Recover some VRAM contents
4288  *
4289  * @adev: amdgpu_device pointer
4290  *
4291  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4292  * restore things like GPUVM page tables after a GPU reset where
4293  * the contents of VRAM might be lost.
4294  *
4295  * Returns:
4296  * 0 on success, negative error code on failure.
4297  */
4298 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4299 {
4300 	struct dma_fence *fence = NULL, *next = NULL;
4301 	struct amdgpu_bo *shadow;
4302 	struct amdgpu_bo_vm *vmbo;
4303 	long r = 1, tmo;
4304 
4305 	if (amdgpu_sriov_runtime(adev))
4306 		tmo = msecs_to_jiffies(8000);
4307 	else
4308 		tmo = msecs_to_jiffies(100);
4309 
4310 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4311 	mutex_lock(&adev->shadow_list_lock);
4312 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4313 		shadow = &vmbo->bo;
4314 		/* No need to recover an evicted BO */
4315 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4316 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4317 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4318 			continue;
4319 
4320 		r = amdgpu_bo_restore_shadow(shadow, &next);
4321 		if (r)
4322 			break;
4323 
4324 		if (fence) {
4325 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4326 			dma_fence_put(fence);
4327 			fence = next;
4328 			if (tmo == 0) {
4329 				r = -ETIMEDOUT;
4330 				break;
4331 			} else if (tmo < 0) {
4332 				r = tmo;
4333 				break;
4334 			}
4335 		} else {
4336 			fence = next;
4337 		}
4338 	}
4339 	mutex_unlock(&adev->shadow_list_lock);
4340 
4341 	if (fence)
4342 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4343 	dma_fence_put(fence);
4344 
4345 	if (r < 0 || tmo <= 0) {
4346 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4347 		return -EIO;
4348 	}
4349 
4350 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4351 	return 0;
4352 }
4353 
4354 
4355 /**
4356  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4357  *
4358  * @adev: amdgpu_device pointer
4359  * @from_hypervisor: request from hypervisor
4360  *
4361  * do VF FLR and reinitialize Asic
4362  * return 0 means succeeded otherwise failed
4363  */
4364 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4365 				     bool from_hypervisor)
4366 {
4367 	int r;
4368 	struct amdgpu_hive_info *hive = NULL;
4369 
4370 	amdgpu_amdkfd_pre_reset(adev);
4371 
4372 	amdgpu_amdkfd_pre_reset(adev);
4373 
4374 	if (from_hypervisor)
4375 		r = amdgpu_virt_request_full_gpu(adev, true);
4376 	else
4377 		r = amdgpu_virt_reset_gpu(adev);
4378 	if (r)
4379 		return r;
4380 
4381 	/* Resume IP prior to SMC */
4382 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4383 	if (r)
4384 		goto error;
4385 
4386 	amdgpu_virt_init_data_exchange(adev);
4387 
4388 	r = amdgpu_device_fw_loading(adev);
4389 	if (r)
4390 		return r;
4391 
4392 	/* now we are okay to resume SMC/CP/SDMA */
4393 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4394 	if (r)
4395 		goto error;
4396 
4397 	hive = amdgpu_get_xgmi_hive(adev);
4398 	/* Update PSP FW topology after reset */
4399 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4400 		r = amdgpu_xgmi_update_topology(hive, adev);
4401 
4402 	if (hive)
4403 		amdgpu_put_xgmi_hive(hive);
4404 
4405 	if (!r) {
4406 		amdgpu_irq_gpu_reset_resume_helper(adev);
4407 		r = amdgpu_ib_ring_tests(adev);
4408 		amdgpu_amdkfd_post_reset(adev);
4409 	}
4410 
4411 error:
4412 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4413 		amdgpu_inc_vram_lost(adev);
4414 		r = amdgpu_device_recover_vram(adev);
4415 	}
4416 	amdgpu_virt_release_full_gpu(adev, true);
4417 
4418 	return r;
4419 }
4420 
4421 /**
4422  * amdgpu_device_has_job_running - check if there is any job in mirror list
4423  *
4424  * @adev: amdgpu_device pointer
4425  *
4426  * check if there is any job in mirror list
4427  */
4428 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4429 {
4430 	int i;
4431 	struct drm_sched_job *job;
4432 
4433 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4434 		struct amdgpu_ring *ring = adev->rings[i];
4435 
4436 		if (!ring || !ring->sched.thread)
4437 			continue;
4438 
4439 		spin_lock(&ring->sched.job_list_lock);
4440 		job = list_first_entry_or_null(&ring->sched.pending_list,
4441 					       struct drm_sched_job, list);
4442 		spin_unlock(&ring->sched.job_list_lock);
4443 		if (job)
4444 			return true;
4445 	}
4446 	return false;
4447 }
4448 
4449 /**
4450  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4451  *
4452  * @adev: amdgpu_device pointer
4453  *
4454  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4455  * a hung GPU.
4456  */
4457 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4458 {
4459 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4460 		dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4461 		return false;
4462 	}
4463 
4464 	if (amdgpu_gpu_recovery == 0)
4465 		goto disabled;
4466 
4467 	if (amdgpu_sriov_vf(adev))
4468 		return true;
4469 
4470 	if (amdgpu_gpu_recovery == -1) {
4471 		switch (adev->asic_type) {
4472 #ifdef CONFIG_DRM_AMDGPU_SI
4473 		case CHIP_VERDE:
4474 		case CHIP_TAHITI:
4475 		case CHIP_PITCAIRN:
4476 		case CHIP_OLAND:
4477 		case CHIP_HAINAN:
4478 #endif
4479 #ifdef CONFIG_DRM_AMDGPU_CIK
4480 		case CHIP_KAVERI:
4481 		case CHIP_KABINI:
4482 		case CHIP_MULLINS:
4483 #endif
4484 		case CHIP_CARRIZO:
4485 		case CHIP_STONEY:
4486 		case CHIP_CYAN_SKILLFISH:
4487 			goto disabled;
4488 		default:
4489 			break;
4490 		}
4491 	}
4492 
4493 	return true;
4494 
4495 disabled:
4496 		dev_info(adev->dev, "GPU recovery disabled.\n");
4497 		return false;
4498 }
4499 
4500 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4501 {
4502         u32 i;
4503         int ret = 0;
4504 
4505         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4506 
4507         dev_info(adev->dev, "GPU mode1 reset\n");
4508 
4509         /* disable BM */
4510         pci_clear_master(adev->pdev);
4511 
4512         amdgpu_device_cache_pci_state(adev->pdev);
4513 
4514         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4515                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4516                 ret = amdgpu_dpm_mode1_reset(adev);
4517         } else {
4518                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4519                 ret = psp_gpu_reset(adev);
4520         }
4521 
4522         if (ret)
4523                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4524 
4525         amdgpu_device_load_pci_state(adev->pdev);
4526 
4527         /* wait for asic to come out of reset */
4528         for (i = 0; i < adev->usec_timeout; i++) {
4529                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4530 
4531                 if (memsize != 0xffffffff)
4532                         break;
4533                 udelay(1);
4534         }
4535 
4536         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4537         return ret;
4538 }
4539 
4540 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4541 				 struct amdgpu_reset_context *reset_context)
4542 {
4543 	int i, r = 0;
4544 	struct amdgpu_job *job = NULL;
4545 	bool need_full_reset =
4546 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4547 
4548 	if (reset_context->reset_req_dev == adev)
4549 		job = reset_context->job;
4550 
4551 	if (amdgpu_sriov_vf(adev)) {
4552 		/* stop the data exchange thread */
4553 		amdgpu_virt_fini_data_exchange(adev);
4554 	}
4555 
4556 	/* block all schedulers and reset given job's ring */
4557 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4558 		struct amdgpu_ring *ring = adev->rings[i];
4559 
4560 		if (!ring || !ring->sched.thread)
4561 			continue;
4562 
4563 		/*clear job fence from fence drv to avoid force_completion
4564 		 *leave NULL and vm flush fence in fence drv */
4565 		amdgpu_fence_driver_clear_job_fences(ring);
4566 
4567 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4568 		amdgpu_fence_driver_force_completion(ring);
4569 	}
4570 
4571 	if (job && job->vm)
4572 		drm_sched_increase_karma(&job->base);
4573 
4574 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4575 	/* If reset handler not implemented, continue; otherwise return */
4576 	if (r == -ENOSYS)
4577 		r = 0;
4578 	else
4579 		return r;
4580 
4581 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4582 	if (!amdgpu_sriov_vf(adev)) {
4583 
4584 		if (!need_full_reset)
4585 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4586 
4587 		if (!need_full_reset) {
4588 			amdgpu_device_ip_pre_soft_reset(adev);
4589 			r = amdgpu_device_ip_soft_reset(adev);
4590 			amdgpu_device_ip_post_soft_reset(adev);
4591 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4592 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4593 				need_full_reset = true;
4594 			}
4595 		}
4596 
4597 		if (need_full_reset)
4598 			r = amdgpu_device_ip_suspend(adev);
4599 		if (need_full_reset)
4600 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4601 		else
4602 			clear_bit(AMDGPU_NEED_FULL_RESET,
4603 				  &reset_context->flags);
4604 	}
4605 
4606 	return r;
4607 }
4608 
4609 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4610 			 struct amdgpu_reset_context *reset_context)
4611 {
4612 	struct amdgpu_device *tmp_adev = NULL;
4613 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4614 	int r = 0;
4615 
4616 	/* Try reset handler method first */
4617 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4618 				    reset_list);
4619 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4620 	/* If reset handler not implemented, continue; otherwise return */
4621 	if (r == -ENOSYS)
4622 		r = 0;
4623 	else
4624 		return r;
4625 
4626 	/* Reset handler not implemented, use the default method */
4627 	need_full_reset =
4628 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4629 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4630 
4631 	/*
4632 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4633 	 * to allow proper links negotiation in FW (within 1 sec)
4634 	 */
4635 	if (!skip_hw_reset && need_full_reset) {
4636 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4637 			/* For XGMI run all resets in parallel to speed up the process */
4638 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4639 				tmp_adev->gmc.xgmi.pending_reset = false;
4640 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4641 					r = -EALREADY;
4642 			} else
4643 				r = amdgpu_asic_reset(tmp_adev);
4644 
4645 			if (r) {
4646 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4647 					 r, adev_to_drm(tmp_adev)->unique);
4648 				break;
4649 			}
4650 		}
4651 
4652 		/* For XGMI wait for all resets to complete before proceed */
4653 		if (!r) {
4654 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4655 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4656 					flush_work(&tmp_adev->xgmi_reset_work);
4657 					r = tmp_adev->asic_reset_res;
4658 					if (r)
4659 						break;
4660 				}
4661 			}
4662 		}
4663 	}
4664 
4665 	if (!r && amdgpu_ras_intr_triggered()) {
4666 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4667 			if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4668 			    tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4669 				tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4670 		}
4671 
4672 		amdgpu_ras_intr_cleared();
4673 	}
4674 
4675 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4676 		if (need_full_reset) {
4677 			/* post card */
4678 			r = amdgpu_device_asic_init(tmp_adev);
4679 			if (r) {
4680 				dev_warn(tmp_adev->dev, "asic atom init failed!");
4681 			} else {
4682 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4683 				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4684 				if (r)
4685 					goto out;
4686 
4687 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4688 				if (r)
4689 					goto out;
4690 
4691 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4692 				if (vram_lost) {
4693 					DRM_INFO("VRAM is lost due to GPU reset!\n");
4694 					amdgpu_inc_vram_lost(tmp_adev);
4695 				}
4696 
4697 				r = amdgpu_device_fw_loading(tmp_adev);
4698 				if (r)
4699 					return r;
4700 
4701 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4702 				if (r)
4703 					goto out;
4704 
4705 				if (vram_lost)
4706 					amdgpu_device_fill_reset_magic(tmp_adev);
4707 
4708 				/*
4709 				 * Add this ASIC as tracked as reset was already
4710 				 * complete successfully.
4711 				 */
4712 				amdgpu_register_gpu_instance(tmp_adev);
4713 
4714 				if (!reset_context->hive &&
4715 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4716 					amdgpu_xgmi_add_device(tmp_adev);
4717 
4718 				r = amdgpu_device_ip_late_init(tmp_adev);
4719 				if (r)
4720 					goto out;
4721 
4722 				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4723 
4724 				/*
4725 				 * The GPU enters bad state once faulty pages
4726 				 * by ECC has reached the threshold, and ras
4727 				 * recovery is scheduled next. So add one check
4728 				 * here to break recovery if it indeed exceeds
4729 				 * bad page threshold, and remind user to
4730 				 * retire this GPU or setting one bigger
4731 				 * bad_page_threshold value to fix this once
4732 				 * probing driver again.
4733 				 */
4734 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4735 					/* must succeed. */
4736 					amdgpu_ras_resume(tmp_adev);
4737 				} else {
4738 					r = -EINVAL;
4739 					goto out;
4740 				}
4741 
4742 				/* Update PSP FW topology after reset */
4743 				if (reset_context->hive &&
4744 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4745 					r = amdgpu_xgmi_update_topology(
4746 						reset_context->hive, tmp_adev);
4747 			}
4748 		}
4749 
4750 out:
4751 		if (!r) {
4752 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4753 			r = amdgpu_ib_ring_tests(tmp_adev);
4754 			if (r) {
4755 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4756 				need_full_reset = true;
4757 				r = -EAGAIN;
4758 				goto end;
4759 			}
4760 		}
4761 
4762 		if (!r)
4763 			r = amdgpu_device_recover_vram(tmp_adev);
4764 		else
4765 			tmp_adev->asic_reset_res = r;
4766 	}
4767 
4768 end:
4769 	if (need_full_reset)
4770 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4771 	else
4772 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4773 	return r;
4774 }
4775 
4776 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4777 				struct amdgpu_hive_info *hive)
4778 {
4779 	if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4780 		return false;
4781 
4782 	if (hive) {
4783 		down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4784 	} else {
4785 		down_write(&adev->reset_sem);
4786 	}
4787 
4788 	switch (amdgpu_asic_reset_method(adev)) {
4789 	case AMD_RESET_METHOD_MODE1:
4790 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4791 		break;
4792 	case AMD_RESET_METHOD_MODE2:
4793 		adev->mp1_state = PP_MP1_STATE_RESET;
4794 		break;
4795 	default:
4796 		adev->mp1_state = PP_MP1_STATE_NONE;
4797 		break;
4798 	}
4799 
4800 	return true;
4801 }
4802 
4803 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4804 {
4805 	amdgpu_vf_error_trans_all(adev);
4806 	adev->mp1_state = PP_MP1_STATE_NONE;
4807 	atomic_set(&adev->in_gpu_reset, 0);
4808 	up_write(&adev->reset_sem);
4809 }
4810 
4811 /*
4812  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4813  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4814  *
4815  * unlock won't require roll back.
4816  */
4817 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4818 {
4819 	struct amdgpu_device *tmp_adev = NULL;
4820 
4821 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
4822 		if (!hive) {
4823 			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4824 			return -ENODEV;
4825 		}
4826 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4827 			if (!amdgpu_device_lock_adev(tmp_adev, hive))
4828 				goto roll_back;
4829 		}
4830 	} else if (!amdgpu_device_lock_adev(adev, hive))
4831 		return -EAGAIN;
4832 
4833 	return 0;
4834 roll_back:
4835 	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4836 		/*
4837 		 * if the lockup iteration break in the middle of a hive,
4838 		 * it may means there may has a race issue,
4839 		 * or a hive device locked up independently.
4840 		 * we may be in trouble and may not, so will try to roll back
4841 		 * the lock and give out a warnning.
4842 		 */
4843 		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4844 		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4845 			amdgpu_device_unlock_adev(tmp_adev);
4846 		}
4847 	}
4848 	return -EAGAIN;
4849 }
4850 
4851 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4852 {
4853 	struct pci_dev *p = NULL;
4854 
4855 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4856 			adev->pdev->bus->number, 1);
4857 	if (p) {
4858 		pm_runtime_enable(&(p->dev));
4859 		pm_runtime_resume(&(p->dev));
4860 	}
4861 }
4862 
4863 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4864 {
4865 	enum amd_reset_method reset_method;
4866 	struct pci_dev *p = NULL;
4867 	u64 expires;
4868 
4869 	/*
4870 	 * For now, only BACO and mode1 reset are confirmed
4871 	 * to suffer the audio issue without proper suspended.
4872 	 */
4873 	reset_method = amdgpu_asic_reset_method(adev);
4874 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4875 	     (reset_method != AMD_RESET_METHOD_MODE1))
4876 		return -EINVAL;
4877 
4878 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4879 			adev->pdev->bus->number, 1);
4880 	if (!p)
4881 		return -ENODEV;
4882 
4883 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4884 	if (!expires)
4885 		/*
4886 		 * If we cannot get the audio device autosuspend delay,
4887 		 * a fixed 4S interval will be used. Considering 3S is
4888 		 * the audio controller default autosuspend delay setting.
4889 		 * 4S used here is guaranteed to cover that.
4890 		 */
4891 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4892 
4893 	while (!pm_runtime_status_suspended(&(p->dev))) {
4894 		if (!pm_runtime_suspend(&(p->dev)))
4895 			break;
4896 
4897 		if (expires < ktime_get_mono_fast_ns()) {
4898 			dev_warn(adev->dev, "failed to suspend display audio\n");
4899 			/* TODO: abort the succeeding gpu reset? */
4900 			return -ETIMEDOUT;
4901 		}
4902 	}
4903 
4904 	pm_runtime_disable(&(p->dev));
4905 
4906 	return 0;
4907 }
4908 
4909 static void amdgpu_device_recheck_guilty_jobs(
4910 	struct amdgpu_device *adev, struct list_head *device_list_handle,
4911 	struct amdgpu_reset_context *reset_context)
4912 {
4913 	int i, r = 0;
4914 
4915 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4916 		struct amdgpu_ring *ring = adev->rings[i];
4917 		int ret = 0;
4918 		struct drm_sched_job *s_job;
4919 
4920 		if (!ring || !ring->sched.thread)
4921 			continue;
4922 
4923 		s_job = list_first_entry_or_null(&ring->sched.pending_list,
4924 				struct drm_sched_job, list);
4925 		if (s_job == NULL)
4926 			continue;
4927 
4928 		/* clear job's guilty and depend the folowing step to decide the real one */
4929 		drm_sched_reset_karma(s_job);
4930 		/* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4931 		 * to make sure fence is balanced */
4932 		dma_fence_get(s_job->s_fence->parent);
4933 		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4934 
4935 		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4936 		if (ret == 0) { /* timeout */
4937 			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4938 						ring->sched.name, s_job->id);
4939 
4940 			/* set guilty */
4941 			drm_sched_increase_karma(s_job);
4942 retry:
4943 			/* do hw reset */
4944 			if (amdgpu_sriov_vf(adev)) {
4945 				amdgpu_virt_fini_data_exchange(adev);
4946 				r = amdgpu_device_reset_sriov(adev, false);
4947 				if (r)
4948 					adev->asic_reset_res = r;
4949 			} else {
4950 				clear_bit(AMDGPU_SKIP_HW_RESET,
4951 					  &reset_context->flags);
4952 				r = amdgpu_do_asic_reset(device_list_handle,
4953 							 reset_context);
4954 				if (r && r == -EAGAIN)
4955 					goto retry;
4956 			}
4957 
4958 			/*
4959 			 * add reset counter so that the following
4960 			 * resubmitted job could flush vmid
4961 			 */
4962 			atomic_inc(&adev->gpu_reset_counter);
4963 			continue;
4964 		}
4965 
4966 		/* got the hw fence, signal finished fence */
4967 		atomic_dec(ring->sched.score);
4968 		dma_fence_put(s_job->s_fence->parent);
4969 		dma_fence_get(&s_job->s_fence->finished);
4970 		dma_fence_signal(&s_job->s_fence->finished);
4971 		dma_fence_put(&s_job->s_fence->finished);
4972 
4973 		/* remove node from list and free the job */
4974 		spin_lock(&ring->sched.job_list_lock);
4975 		list_del_init(&s_job->list);
4976 		spin_unlock(&ring->sched.job_list_lock);
4977 		ring->sched.ops->free_job(s_job);
4978 	}
4979 }
4980 
4981 /**
4982  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4983  *
4984  * @adev: amdgpu_device pointer
4985  * @job: which job trigger hang
4986  *
4987  * Attempt to reset the GPU if it has hung (all asics).
4988  * Attempt to do soft-reset or full-reset and reinitialize Asic
4989  * Returns 0 for success or an error on failure.
4990  */
4991 
4992 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4993 			      struct amdgpu_job *job)
4994 {
4995 	struct list_head device_list, *device_list_handle =  NULL;
4996 	bool job_signaled = false;
4997 	struct amdgpu_hive_info *hive = NULL;
4998 	struct amdgpu_device *tmp_adev = NULL;
4999 	int i, r = 0;
5000 	bool need_emergency_restart = false;
5001 	bool audio_suspended = false;
5002 	int tmp_vram_lost_counter;
5003 	struct amdgpu_reset_context reset_context;
5004 
5005 	memset(&reset_context, 0, sizeof(reset_context));
5006 
5007 	/*
5008 	 * Special case: RAS triggered and full reset isn't supported
5009 	 */
5010 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5011 
5012 	/*
5013 	 * Flush RAM to disk so that after reboot
5014 	 * the user can read log and see why the system rebooted.
5015 	 */
5016 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
5017 		DRM_WARN("Emergency reboot.");
5018 
5019 		ksys_sync_helper();
5020 		emergency_restart();
5021 	}
5022 
5023 	dev_info(adev->dev, "GPU %s begin!\n",
5024 		need_emergency_restart ? "jobs stop":"reset");
5025 
5026 	/*
5027 	 * Here we trylock to avoid chain of resets executing from
5028 	 * either trigger by jobs on different adevs in XGMI hive or jobs on
5029 	 * different schedulers for same device while this TO handler is running.
5030 	 * We always reset all schedulers for device and all devices for XGMI
5031 	 * hive so that should take care of them too.
5032 	 */
5033 	if (!amdgpu_sriov_vf(adev))
5034 		hive = amdgpu_get_xgmi_hive(adev);
5035 	if (hive) {
5036 		if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
5037 			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
5038 				job ? job->base.id : -1, hive->hive_id);
5039 			amdgpu_put_xgmi_hive(hive);
5040 			if (job && job->vm)
5041 				drm_sched_increase_karma(&job->base);
5042 			return 0;
5043 		}
5044 		mutex_lock(&hive->hive_lock);
5045 	}
5046 
5047 	reset_context.method = AMD_RESET_METHOD_NONE;
5048 	reset_context.reset_req_dev = adev;
5049 	reset_context.job = job;
5050 	reset_context.hive = hive;
5051 	clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5052 
5053 	/*
5054 	 * lock the device before we try to operate the linked list
5055 	 * if didn't get the device lock, don't touch the linked list since
5056 	 * others may iterating it.
5057 	 */
5058 	r = amdgpu_device_lock_hive_adev(adev, hive);
5059 	if (r) {
5060 		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
5061 					job ? job->base.id : -1);
5062 
5063 		/* even we skipped this reset, still need to set the job to guilty */
5064 		if (job && job->vm)
5065 			drm_sched_increase_karma(&job->base);
5066 		goto skip_recovery;
5067 	}
5068 
5069 	/*
5070 	 * Build list of devices to reset.
5071 	 * In case we are in XGMI hive mode, resort the device list
5072 	 * to put adev in the 1st position.
5073 	 */
5074 	INIT_LIST_HEAD(&device_list);
5075 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5076 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5077 			list_add_tail(&tmp_adev->reset_list, &device_list);
5078 		if (!list_is_first(&adev->reset_list, &device_list))
5079 			list_rotate_to_front(&adev->reset_list, &device_list);
5080 		device_list_handle = &device_list;
5081 	} else {
5082 		list_add_tail(&adev->reset_list, &device_list);
5083 		device_list_handle = &device_list;
5084 	}
5085 
5086 	/* block all schedulers and reset given job's ring */
5087 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5088 		/*
5089 		 * Try to put the audio codec into suspend state
5090 		 * before gpu reset started.
5091 		 *
5092 		 * Due to the power domain of the graphics device
5093 		 * is shared with AZ power domain. Without this,
5094 		 * we may change the audio hardware from behind
5095 		 * the audio driver's back. That will trigger
5096 		 * some audio codec errors.
5097 		 */
5098 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5099 			audio_suspended = true;
5100 
5101 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5102 
5103 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5104 
5105 		if (!amdgpu_sriov_vf(tmp_adev))
5106 			amdgpu_amdkfd_pre_reset(tmp_adev);
5107 
5108 		/*
5109 		 * Mark these ASICs to be reseted as untracked first
5110 		 * And add them back after reset completed
5111 		 */
5112 		amdgpu_unregister_gpu_instance(tmp_adev);
5113 
5114 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5115 
5116 		/* disable ras on ALL IPs */
5117 		if (!need_emergency_restart &&
5118 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5119 			amdgpu_ras_suspend(tmp_adev);
5120 
5121 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5122 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5123 
5124 			if (!ring || !ring->sched.thread)
5125 				continue;
5126 
5127 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5128 
5129 			if (need_emergency_restart)
5130 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5131 		}
5132 		atomic_inc(&tmp_adev->gpu_reset_counter);
5133 	}
5134 
5135 	if (need_emergency_restart)
5136 		goto skip_sched_resume;
5137 
5138 	/*
5139 	 * Must check guilty signal here since after this point all old
5140 	 * HW fences are force signaled.
5141 	 *
5142 	 * job->base holds a reference to parent fence
5143 	 */
5144 	if (job && job->base.s_fence->parent &&
5145 	    dma_fence_is_signaled(job->base.s_fence->parent)) {
5146 		job_signaled = true;
5147 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5148 		goto skip_hw_reset;
5149 	}
5150 
5151 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5152 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5153 		r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5154 		/*TODO Should we stop ?*/
5155 		if (r) {
5156 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5157 				  r, adev_to_drm(tmp_adev)->unique);
5158 			tmp_adev->asic_reset_res = r;
5159 		}
5160 	}
5161 
5162 	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5163 	/* Actual ASIC resets if needed.*/
5164 	/* Host driver will handle XGMI hive reset for SRIOV */
5165 	if (amdgpu_sriov_vf(adev)) {
5166 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5167 		if (r)
5168 			adev->asic_reset_res = r;
5169 	} else {
5170 		r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5171 		if (r && r == -EAGAIN)
5172 			goto retry;
5173 	}
5174 
5175 skip_hw_reset:
5176 
5177 	/* Post ASIC reset for all devs .*/
5178 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5179 
5180 		/*
5181 		 * Sometimes a later bad compute job can block a good gfx job as gfx
5182 		 * and compute ring share internal GC HW mutually. We add an additional
5183 		 * guilty jobs recheck step to find the real guilty job, it synchronously
5184 		 * submits and pends for the first job being signaled. If it gets timeout,
5185 		 * we identify it as a real guilty job.
5186 		 */
5187 		if (amdgpu_gpu_recovery == 2 &&
5188 			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5189 			amdgpu_device_recheck_guilty_jobs(
5190 				tmp_adev, device_list_handle, &reset_context);
5191 
5192 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5193 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5194 
5195 			if (!ring || !ring->sched.thread)
5196 				continue;
5197 
5198 			/* No point to resubmit jobs if we didn't HW reset*/
5199 			if (!tmp_adev->asic_reset_res && !job_signaled)
5200 				drm_sched_resubmit_jobs(&ring->sched);
5201 
5202 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5203 		}
5204 
5205 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
5206 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5207 		}
5208 
5209 		tmp_adev->asic_reset_res = 0;
5210 
5211 		if (r) {
5212 			/* bad news, how to tell it to userspace ? */
5213 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5214 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5215 		} else {
5216 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5217 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5218 				DRM_WARN("smart shift update failed\n");
5219 		}
5220 	}
5221 
5222 skip_sched_resume:
5223 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5224 		/* unlock kfd: SRIOV would do it separately */
5225 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5226 			amdgpu_amdkfd_post_reset(tmp_adev);
5227 
5228 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5229 		 * need to bring up kfd here if it's not be initialized before
5230 		 */
5231 		if (!adev->kfd.init_complete)
5232 			amdgpu_amdkfd_device_init(adev);
5233 
5234 		if (audio_suspended)
5235 			amdgpu_device_resume_display_audio(tmp_adev);
5236 		amdgpu_device_unlock_adev(tmp_adev);
5237 	}
5238 
5239 skip_recovery:
5240 	if (hive) {
5241 		atomic_set(&hive->in_reset, 0);
5242 		mutex_unlock(&hive->hive_lock);
5243 		amdgpu_put_xgmi_hive(hive);
5244 	}
5245 
5246 	if (r && r != -EAGAIN)
5247 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5248 	return r;
5249 }
5250 
5251 /**
5252  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5253  *
5254  * @adev: amdgpu_device pointer
5255  *
5256  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5257  * and lanes) of the slot the device is in. Handles APUs and
5258  * virtualized environments where PCIE config space may not be available.
5259  */
5260 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5261 {
5262 	struct pci_dev *pdev;
5263 	enum pci_bus_speed speed_cap, platform_speed_cap;
5264 	enum pcie_link_width platform_link_width;
5265 
5266 	if (amdgpu_pcie_gen_cap)
5267 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5268 
5269 	if (amdgpu_pcie_lane_cap)
5270 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5271 
5272 	/* covers APUs as well */
5273 	if (pci_is_root_bus(adev->pdev->bus)) {
5274 		if (adev->pm.pcie_gen_mask == 0)
5275 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5276 		if (adev->pm.pcie_mlw_mask == 0)
5277 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5278 		return;
5279 	}
5280 
5281 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5282 		return;
5283 
5284 	pcie_bandwidth_available(adev->pdev, NULL,
5285 				 &platform_speed_cap, &platform_link_width);
5286 
5287 	if (adev->pm.pcie_gen_mask == 0) {
5288 		/* asic caps */
5289 		pdev = adev->pdev;
5290 		speed_cap = pcie_get_speed_cap(pdev);
5291 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5292 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5293 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5294 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5295 		} else {
5296 			if (speed_cap == PCIE_SPEED_32_0GT)
5297 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5298 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5299 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5300 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5301 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5302 			else if (speed_cap == PCIE_SPEED_16_0GT)
5303 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5304 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5305 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5306 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5307 			else if (speed_cap == PCIE_SPEED_8_0GT)
5308 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5309 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5310 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5311 			else if (speed_cap == PCIE_SPEED_5_0GT)
5312 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5313 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5314 			else
5315 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5316 		}
5317 		/* platform caps */
5318 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5319 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5320 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5321 		} else {
5322 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5323 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5324 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5325 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5326 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5327 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5328 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5329 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5330 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5331 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5332 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5333 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5334 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5335 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5336 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5337 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5338 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5339 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5340 			else
5341 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5342 
5343 		}
5344 	}
5345 	if (adev->pm.pcie_mlw_mask == 0) {
5346 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5347 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5348 		} else {
5349 			switch (platform_link_width) {
5350 			case PCIE_LNK_X32:
5351 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5352 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5353 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5354 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5355 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5356 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5357 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5358 				break;
5359 			case PCIE_LNK_X16:
5360 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5361 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5362 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5363 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5364 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5365 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5366 				break;
5367 			case PCIE_LNK_X12:
5368 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5369 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5370 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5371 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5372 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5373 				break;
5374 			case PCIE_LNK_X8:
5375 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5376 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5377 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5378 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5379 				break;
5380 			case PCIE_LNK_X4:
5381 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5382 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5383 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5384 				break;
5385 			case PCIE_LNK_X2:
5386 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5387 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5388 				break;
5389 			case PCIE_LNK_X1:
5390 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5391 				break;
5392 			default:
5393 				break;
5394 			}
5395 		}
5396 	}
5397 }
5398 
5399 int amdgpu_device_baco_enter(struct drm_device *dev)
5400 {
5401 	struct amdgpu_device *adev = drm_to_adev(dev);
5402 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5403 
5404 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5405 		return -ENOTSUPP;
5406 
5407 	if (ras && adev->ras_enabled &&
5408 	    adev->nbio.funcs->enable_doorbell_interrupt)
5409 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5410 
5411 	return amdgpu_dpm_baco_enter(adev);
5412 }
5413 
5414 int amdgpu_device_baco_exit(struct drm_device *dev)
5415 {
5416 	struct amdgpu_device *adev = drm_to_adev(dev);
5417 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5418 	int ret = 0;
5419 
5420 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5421 		return -ENOTSUPP;
5422 
5423 	ret = amdgpu_dpm_baco_exit(adev);
5424 	if (ret)
5425 		return ret;
5426 
5427 	if (ras && adev->ras_enabled &&
5428 	    adev->nbio.funcs->enable_doorbell_interrupt)
5429 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5430 
5431 	if (amdgpu_passthrough(adev) &&
5432 	    adev->nbio.funcs->clear_doorbell_interrupt)
5433 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5434 
5435 	return 0;
5436 }
5437 
5438 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5439 {
5440 	int i;
5441 
5442 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5443 		struct amdgpu_ring *ring = adev->rings[i];
5444 
5445 		if (!ring || !ring->sched.thread)
5446 			continue;
5447 
5448 		cancel_delayed_work_sync(&ring->sched.work_tdr);
5449 	}
5450 }
5451 
5452 /**
5453  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5454  * @pdev: PCI device struct
5455  * @state: PCI channel state
5456  *
5457  * Description: Called when a PCI error is detected.
5458  *
5459  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5460  */
5461 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5462 {
5463 	struct drm_device *dev = pci_get_drvdata(pdev);
5464 	struct amdgpu_device *adev = drm_to_adev(dev);
5465 	int i;
5466 
5467 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5468 
5469 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5470 		DRM_WARN("No support for XGMI hive yet...");
5471 		return PCI_ERS_RESULT_DISCONNECT;
5472 	}
5473 
5474 	adev->pci_channel_state = state;
5475 
5476 	switch (state) {
5477 	case pci_channel_io_normal:
5478 		return PCI_ERS_RESULT_CAN_RECOVER;
5479 	/* Fatal error, prepare for slot reset */
5480 	case pci_channel_io_frozen:
5481 		/*
5482 		 * Cancel and wait for all TDRs in progress if failing to
5483 		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5484 		 *
5485 		 * Locking adev->reset_sem will prevent any external access
5486 		 * to GPU during PCI error recovery
5487 		 */
5488 		while (!amdgpu_device_lock_adev(adev, NULL))
5489 			amdgpu_cancel_all_tdr(adev);
5490 
5491 		/*
5492 		 * Block any work scheduling as we do for regular GPU reset
5493 		 * for the duration of the recovery
5494 		 */
5495 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5496 			struct amdgpu_ring *ring = adev->rings[i];
5497 
5498 			if (!ring || !ring->sched.thread)
5499 				continue;
5500 
5501 			drm_sched_stop(&ring->sched, NULL);
5502 		}
5503 		atomic_inc(&adev->gpu_reset_counter);
5504 		return PCI_ERS_RESULT_NEED_RESET;
5505 	case pci_channel_io_perm_failure:
5506 		/* Permanent error, prepare for device removal */
5507 		return PCI_ERS_RESULT_DISCONNECT;
5508 	}
5509 
5510 	return PCI_ERS_RESULT_NEED_RESET;
5511 }
5512 
5513 /**
5514  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5515  * @pdev: pointer to PCI device
5516  */
5517 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5518 {
5519 
5520 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5521 
5522 	/* TODO - dump whatever for debugging purposes */
5523 
5524 	/* This called only if amdgpu_pci_error_detected returns
5525 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5526 	 * works, no need to reset slot.
5527 	 */
5528 
5529 	return PCI_ERS_RESULT_RECOVERED;
5530 }
5531 
5532 /**
5533  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5534  * @pdev: PCI device struct
5535  *
5536  * Description: This routine is called by the pci error recovery
5537  * code after the PCI slot has been reset, just before we
5538  * should resume normal operations.
5539  */
5540 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5541 {
5542 	struct drm_device *dev = pci_get_drvdata(pdev);
5543 	struct amdgpu_device *adev = drm_to_adev(dev);
5544 	int r, i;
5545 	struct amdgpu_reset_context reset_context;
5546 	u32 memsize;
5547 	struct list_head device_list;
5548 
5549 	DRM_INFO("PCI error: slot reset callback!!\n");
5550 
5551 	memset(&reset_context, 0, sizeof(reset_context));
5552 
5553 	INIT_LIST_HEAD(&device_list);
5554 	list_add_tail(&adev->reset_list, &device_list);
5555 
5556 	/* wait for asic to come out of reset */
5557 	msleep(500);
5558 
5559 	/* Restore PCI confspace */
5560 	amdgpu_device_load_pci_state(pdev);
5561 
5562 	/* confirm  ASIC came out of reset */
5563 	for (i = 0; i < adev->usec_timeout; i++) {
5564 		memsize = amdgpu_asic_get_config_memsize(adev);
5565 
5566 		if (memsize != 0xffffffff)
5567 			break;
5568 		udelay(1);
5569 	}
5570 	if (memsize == 0xffffffff) {
5571 		r = -ETIME;
5572 		goto out;
5573 	}
5574 
5575 	reset_context.method = AMD_RESET_METHOD_NONE;
5576 	reset_context.reset_req_dev = adev;
5577 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5578 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5579 
5580 	adev->no_hw_access = true;
5581 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5582 	adev->no_hw_access = false;
5583 	if (r)
5584 		goto out;
5585 
5586 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5587 
5588 out:
5589 	if (!r) {
5590 		if (amdgpu_device_cache_pci_state(adev->pdev))
5591 			pci_restore_state(adev->pdev);
5592 
5593 		DRM_INFO("PCIe error recovery succeeded\n");
5594 	} else {
5595 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5596 		amdgpu_device_unlock_adev(adev);
5597 	}
5598 
5599 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5600 }
5601 
5602 /**
5603  * amdgpu_pci_resume() - resume normal ops after PCI reset
5604  * @pdev: pointer to PCI device
5605  *
5606  * Called when the error recovery driver tells us that its
5607  * OK to resume normal operation.
5608  */
5609 void amdgpu_pci_resume(struct pci_dev *pdev)
5610 {
5611 	struct drm_device *dev = pci_get_drvdata(pdev);
5612 	struct amdgpu_device *adev = drm_to_adev(dev);
5613 	int i;
5614 
5615 
5616 	DRM_INFO("PCI error: resume callback!!\n");
5617 
5618 	/* Only continue execution for the case of pci_channel_io_frozen */
5619 	if (adev->pci_channel_state != pci_channel_io_frozen)
5620 		return;
5621 
5622 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5623 		struct amdgpu_ring *ring = adev->rings[i];
5624 
5625 		if (!ring || !ring->sched.thread)
5626 			continue;
5627 
5628 
5629 		drm_sched_resubmit_jobs(&ring->sched);
5630 		drm_sched_start(&ring->sched, true);
5631 	}
5632 
5633 	amdgpu_device_unlock_adev(adev);
5634 }
5635 
5636 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5637 {
5638 	struct drm_device *dev = pci_get_drvdata(pdev);
5639 	struct amdgpu_device *adev = drm_to_adev(dev);
5640 	int r;
5641 
5642 	r = pci_save_state(pdev);
5643 	if (!r) {
5644 		kfree(adev->pci_state);
5645 
5646 		adev->pci_state = pci_store_saved_state(pdev);
5647 
5648 		if (!adev->pci_state) {
5649 			DRM_ERROR("Failed to store PCI saved state");
5650 			return false;
5651 		}
5652 	} else {
5653 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5654 		return false;
5655 	}
5656 
5657 	return true;
5658 }
5659 
5660 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5661 {
5662 	struct drm_device *dev = pci_get_drvdata(pdev);
5663 	struct amdgpu_device *adev = drm_to_adev(dev);
5664 	int r;
5665 
5666 	if (!adev->pci_state)
5667 		return false;
5668 
5669 	r = pci_load_saved_state(pdev, adev->pci_state);
5670 
5671 	if (!r) {
5672 		pci_restore_state(pdev);
5673 	} else {
5674 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5675 		return false;
5676 	}
5677 
5678 	return true;
5679 }
5680 
5681 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5682 		struct amdgpu_ring *ring)
5683 {
5684 #ifdef CONFIG_X86_64
5685 	if (adev->flags & AMD_IS_APU)
5686 		return;
5687 #endif
5688 	if (adev->gmc.xgmi.connected_to_cpu)
5689 		return;
5690 
5691 	if (ring && ring->funcs->emit_hdp_flush)
5692 		amdgpu_ring_emit_hdp_flush(ring);
5693 	else
5694 		amdgpu_asic_flush_hdp(adev, ring);
5695 }
5696 
5697 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5698 		struct amdgpu_ring *ring)
5699 {
5700 #ifdef CONFIG_X86_64
5701 	if (adev->flags & AMD_IS_APU)
5702 		return;
5703 #endif
5704 	if (adev->gmc.xgmi.connected_to_cpu)
5705 		return;
5706 
5707 	amdgpu_asic_invalidate_hdp(adev, ring);
5708 }
5709 
5710 /**
5711  * amdgpu_device_halt() - bring hardware to some kind of halt state
5712  *
5713  * @adev: amdgpu_device pointer
5714  *
5715  * Bring hardware to some kind of halt state so that no one can touch it
5716  * any more. It will help to maintain error context when error occurred.
5717  * Compare to a simple hang, the system will keep stable at least for SSH
5718  * access. Then it should be trivial to inspect the hardware state and
5719  * see what's going on. Implemented as following:
5720  *
5721  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5722  *    clears all CPU mappings to device, disallows remappings through page faults
5723  * 2. amdgpu_irq_disable_all() disables all interrupts
5724  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5725  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5726  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5727  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5728  *    flush any in flight DMA operations
5729  */
5730 void amdgpu_device_halt(struct amdgpu_device *adev)
5731 {
5732 	struct pci_dev *pdev = adev->pdev;
5733 	struct drm_device *ddev = adev_to_drm(adev);
5734 
5735 	drm_dev_unplug(ddev);
5736 
5737 	amdgpu_irq_disable_all(adev);
5738 
5739 	amdgpu_fence_driver_hw_fini(adev);
5740 
5741 	adev->no_hw_access = true;
5742 
5743 	amdgpu_device_unmap_mmio(adev);
5744 
5745 	pci_disable_device(pdev);
5746 	pci_wait_for_pending_transaction(pdev);
5747 }
5748 
5749 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5750 				u32 reg)
5751 {
5752 	unsigned long flags, address, data;
5753 	u32 r;
5754 
5755 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5756 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5757 
5758 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5759 	WREG32(address, reg * 4);
5760 	(void)RREG32(address);
5761 	r = RREG32(data);
5762 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5763 	return r;
5764 }
5765 
5766 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5767 				u32 reg, u32 v)
5768 {
5769 	unsigned long flags, address, data;
5770 
5771 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5772 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5773 
5774 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5775 	WREG32(address, reg * 4);
5776 	(void)RREG32(address);
5777 	WREG32(data, v);
5778 	(void)RREG32(data);
5779 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5780 }
5781